Commit 0b07de85a76e1346e675f0e98437378932473df7

Authored by Adel Gadllah
Committed by Jens Axboe
1 parent 6e2401ad6f

allow userspace to modify scsi command filter on per device basis

This patch exports the per-gendisk command filter to user space through
sysfs, so it can be changed by the system administrator.
All users of the old cmd filter have been converted to use the new one.

Original patch from Peter Jones.

Signed-off-by: Adel Gadllah <adel.gadllah@gmail.com>
Signed-off-by: Peter Jones <pjones@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

Showing 8 changed files with 389 additions and 159 deletions Inline Diff

1 # 1 #
2 # Makefile for the kernel block layer 2 # Makefile for the kernel block layer
3 # 3 #
4 4
5 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o 7 blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \
8 cmd-filter.o
8 9
9 obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10 obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
10 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o 11 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
11 obj-$(CONFIG_IOSCHED_AS) += as-iosched.o 12 obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
12 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o 13 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
13 obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o 14 obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
14 15
15 obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o 16 obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
16 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 17 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
17 obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 18 obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
18 19
1 /* 1 /*
2 * bsg.c - block layer implementation of the sg v4 interface 2 * bsg.c - block layer implementation of the sg v4 interface
3 * 3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs 4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> 5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this 8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details. 9 * archive for more details.
10 * 10 *
11 */ 11 */
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/file.h> 14 #include <linux/file.h>
15 #include <linux/blkdev.h> 15 #include <linux/blkdev.h>
16 #include <linux/poll.h> 16 #include <linux/poll.h>
17 #include <linux/cdev.h> 17 #include <linux/cdev.h>
18 #include <linux/percpu.h> 18 #include <linux/percpu.h>
19 #include <linux/uio.h> 19 #include <linux/uio.h>
20 #include <linux/idr.h> 20 #include <linux/idr.h>
21 #include <linux/bsg.h> 21 #include <linux/bsg.h>
22 22
23 #include <scsi/scsi.h> 23 #include <scsi/scsi.h>
24 #include <scsi/scsi_ioctl.h> 24 #include <scsi/scsi_ioctl.h>
25 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h> 27 #include <scsi/scsi_driver.h>
28 #include <scsi/sg.h> 28 #include <scsi/sg.h>
29 29
30 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" 30 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
31 #define BSG_VERSION "0.4" 31 #define BSG_VERSION "0.4"
32 32
33 struct bsg_device { 33 struct bsg_device {
34 struct request_queue *queue; 34 struct request_queue *queue;
35 spinlock_t lock; 35 spinlock_t lock;
36 struct list_head busy_list; 36 struct list_head busy_list;
37 struct list_head done_list; 37 struct list_head done_list;
38 struct hlist_node dev_list; 38 struct hlist_node dev_list;
39 atomic_t ref_count; 39 atomic_t ref_count;
40 int queued_cmds; 40 int queued_cmds;
41 int done_cmds; 41 int done_cmds;
42 wait_queue_head_t wq_done; 42 wait_queue_head_t wq_done;
43 wait_queue_head_t wq_free; 43 wait_queue_head_t wq_free;
44 char name[BUS_ID_SIZE]; 44 char name[BUS_ID_SIZE];
45 int max_queue; 45 int max_queue;
46 unsigned long flags; 46 unsigned long flags;
47 struct blk_scsi_cmd_filter *cmd_filter;
48 mode_t *f_mode;
47 }; 49 };
48 50
49 enum { 51 enum {
50 BSG_F_BLOCK = 1, 52 BSG_F_BLOCK = 1,
51 BSG_F_WRITE_PERM = 2,
52 }; 53 };
53 54
54 #define BSG_DEFAULT_CMDS 64 55 #define BSG_DEFAULT_CMDS 64
55 #define BSG_MAX_DEVS 32768 56 #define BSG_MAX_DEVS 32768
56 57
57 #undef BSG_DEBUG 58 #undef BSG_DEBUG
58 59
59 #ifdef BSG_DEBUG 60 #ifdef BSG_DEBUG
60 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) 61 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
61 #else 62 #else
62 #define dprintk(fmt, args...) 63 #define dprintk(fmt, args...)
63 #endif 64 #endif
64 65
65 static DEFINE_MUTEX(bsg_mutex); 66 static DEFINE_MUTEX(bsg_mutex);
66 static DEFINE_IDR(bsg_minor_idr); 67 static DEFINE_IDR(bsg_minor_idr);
67 68
68 #define BSG_LIST_ARRAY_SIZE 8 69 #define BSG_LIST_ARRAY_SIZE 8
69 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; 70 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
70 71
71 static struct class *bsg_class; 72 static struct class *bsg_class;
72 static int bsg_major; 73 static int bsg_major;
73 74
74 static struct kmem_cache *bsg_cmd_cachep; 75 static struct kmem_cache *bsg_cmd_cachep;
75 76
76 /* 77 /*
77 * our internal command type 78 * our internal command type
78 */ 79 */
79 struct bsg_command { 80 struct bsg_command {
80 struct bsg_device *bd; 81 struct bsg_device *bd;
81 struct list_head list; 82 struct list_head list;
82 struct request *rq; 83 struct request *rq;
83 struct bio *bio; 84 struct bio *bio;
84 struct bio *bidi_bio; 85 struct bio *bidi_bio;
85 int err; 86 int err;
86 struct sg_io_v4 hdr; 87 struct sg_io_v4 hdr;
87 char sense[SCSI_SENSE_BUFFERSIZE]; 88 char sense[SCSI_SENSE_BUFFERSIZE];
88 }; 89 };
89 90
90 static void bsg_free_command(struct bsg_command *bc) 91 static void bsg_free_command(struct bsg_command *bc)
91 { 92 {
92 struct bsg_device *bd = bc->bd; 93 struct bsg_device *bd = bc->bd;
93 unsigned long flags; 94 unsigned long flags;
94 95
95 kmem_cache_free(bsg_cmd_cachep, bc); 96 kmem_cache_free(bsg_cmd_cachep, bc);
96 97
97 spin_lock_irqsave(&bd->lock, flags); 98 spin_lock_irqsave(&bd->lock, flags);
98 bd->queued_cmds--; 99 bd->queued_cmds--;
99 spin_unlock_irqrestore(&bd->lock, flags); 100 spin_unlock_irqrestore(&bd->lock, flags);
100 101
101 wake_up(&bd->wq_free); 102 wake_up(&bd->wq_free);
102 } 103 }
103 104
104 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) 105 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
105 { 106 {
106 struct bsg_command *bc = ERR_PTR(-EINVAL); 107 struct bsg_command *bc = ERR_PTR(-EINVAL);
107 108
108 spin_lock_irq(&bd->lock); 109 spin_lock_irq(&bd->lock);
109 110
110 if (bd->queued_cmds >= bd->max_queue) 111 if (bd->queued_cmds >= bd->max_queue)
111 goto out; 112 goto out;
112 113
113 bd->queued_cmds++; 114 bd->queued_cmds++;
114 spin_unlock_irq(&bd->lock); 115 spin_unlock_irq(&bd->lock);
115 116
116 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); 117 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
117 if (unlikely(!bc)) { 118 if (unlikely(!bc)) {
118 spin_lock_irq(&bd->lock); 119 spin_lock_irq(&bd->lock);
119 bd->queued_cmds--; 120 bd->queued_cmds--;
120 bc = ERR_PTR(-ENOMEM); 121 bc = ERR_PTR(-ENOMEM);
121 goto out; 122 goto out;
122 } 123 }
123 124
124 bc->bd = bd; 125 bc->bd = bd;
125 INIT_LIST_HEAD(&bc->list); 126 INIT_LIST_HEAD(&bc->list);
126 dprintk("%s: returning free cmd %p\n", bd->name, bc); 127 dprintk("%s: returning free cmd %p\n", bd->name, bc);
127 return bc; 128 return bc;
128 out: 129 out:
129 spin_unlock_irq(&bd->lock); 130 spin_unlock_irq(&bd->lock);
130 return bc; 131 return bc;
131 } 132 }
132 133
133 static inline struct hlist_head *bsg_dev_idx_hash(int index) 134 static inline struct hlist_head *bsg_dev_idx_hash(int index)
134 { 135 {
135 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; 136 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
136 } 137 }
137 138
138 static int bsg_io_schedule(struct bsg_device *bd) 139 static int bsg_io_schedule(struct bsg_device *bd)
139 { 140 {
140 DEFINE_WAIT(wait); 141 DEFINE_WAIT(wait);
141 int ret = 0; 142 int ret = 0;
142 143
143 spin_lock_irq(&bd->lock); 144 spin_lock_irq(&bd->lock);
144 145
145 BUG_ON(bd->done_cmds > bd->queued_cmds); 146 BUG_ON(bd->done_cmds > bd->queued_cmds);
146 147
147 /* 148 /*
148 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no 149 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
149 * work to do", even though we return -ENOSPC after this same test 150 * work to do", even though we return -ENOSPC after this same test
150 * during bsg_write() -- there, it means our buffer can't have more 151 * during bsg_write() -- there, it means our buffer can't have more
151 * bsg_commands added to it, thus has no space left. 152 * bsg_commands added to it, thus has no space left.
152 */ 153 */
153 if (bd->done_cmds == bd->queued_cmds) { 154 if (bd->done_cmds == bd->queued_cmds) {
154 ret = -ENODATA; 155 ret = -ENODATA;
155 goto unlock; 156 goto unlock;
156 } 157 }
157 158
158 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 159 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
159 ret = -EAGAIN; 160 ret = -EAGAIN;
160 goto unlock; 161 goto unlock;
161 } 162 }
162 163
163 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); 164 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
164 spin_unlock_irq(&bd->lock); 165 spin_unlock_irq(&bd->lock);
165 io_schedule(); 166 io_schedule();
166 finish_wait(&bd->wq_done, &wait); 167 finish_wait(&bd->wq_done, &wait);
167 168
168 return ret; 169 return ret;
169 unlock: 170 unlock:
170 spin_unlock_irq(&bd->lock); 171 spin_unlock_irq(&bd->lock);
171 return ret; 172 return ret;
172 } 173 }
173 174
174 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
175 struct sg_io_v4 *hdr, int has_write_perm) 176 struct sg_io_v4 *hdr, struct bsg_device *bd)
176 { 177 {
177 if (hdr->request_len > BLK_MAX_CDB) { 178 if (hdr->request_len > BLK_MAX_CDB) {
178 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); 179 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
179 if (!rq->cmd) 180 if (!rq->cmd)
180 return -ENOMEM; 181 return -ENOMEM;
181 } 182 }
182 183
183 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, 184 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
184 hdr->request_len)) 185 hdr->request_len))
185 return -EFAULT; 186 return -EFAULT;
186 187
187 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { 188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
188 if (blk_verify_command(rq->cmd, has_write_perm)) 189 if (blk_cmd_filter_verify_command(bd->cmd_filter, rq->cmd,
190 bd->f_mode))
189 return -EPERM; 191 return -EPERM;
190 } else if (!capable(CAP_SYS_RAWIO)) 192 } else if (!capable(CAP_SYS_RAWIO))
191 return -EPERM; 193 return -EPERM;
192 194
193 /* 195 /*
194 * fill in request structure 196 * fill in request structure
195 */ 197 */
196 rq->cmd_len = hdr->request_len; 198 rq->cmd_len = hdr->request_len;
197 rq->cmd_type = REQ_TYPE_BLOCK_PC; 199 rq->cmd_type = REQ_TYPE_BLOCK_PC;
198 200
199 rq->timeout = (hdr->timeout * HZ) / 1000; 201 rq->timeout = (hdr->timeout * HZ) / 1000;
200 if (!rq->timeout) 202 if (!rq->timeout)
201 rq->timeout = q->sg_timeout; 203 rq->timeout = q->sg_timeout;
202 if (!rq->timeout) 204 if (!rq->timeout)
203 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 205 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
204 206
205 return 0; 207 return 0;
206 } 208 }
207 209
208 /* 210 /*
209 * Check if sg_io_v4 from user is allowed and valid 211 * Check if sg_io_v4 from user is allowed and valid
210 */ 212 */
211 static int 213 static int
212 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) 214 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
213 { 215 {
214 int ret = 0; 216 int ret = 0;
215 217
216 if (hdr->guard != 'Q') 218 if (hdr->guard != 'Q')
217 return -EINVAL; 219 return -EINVAL;
218 if (hdr->dout_xfer_len > (q->max_sectors << 9) || 220 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
219 hdr->din_xfer_len > (q->max_sectors << 9)) 221 hdr->din_xfer_len > (q->max_sectors << 9))
220 return -EIO; 222 return -EIO;
221 223
222 switch (hdr->protocol) { 224 switch (hdr->protocol) {
223 case BSG_PROTOCOL_SCSI: 225 case BSG_PROTOCOL_SCSI:
224 switch (hdr->subprotocol) { 226 switch (hdr->subprotocol) {
225 case BSG_SUB_PROTOCOL_SCSI_CMD: 227 case BSG_SUB_PROTOCOL_SCSI_CMD:
226 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: 228 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
227 break; 229 break;
228 default: 230 default:
229 ret = -EINVAL; 231 ret = -EINVAL;
230 } 232 }
231 break; 233 break;
232 default: 234 default:
233 ret = -EINVAL; 235 ret = -EINVAL;
234 } 236 }
235 237
236 *rw = hdr->dout_xfer_len ? WRITE : READ; 238 *rw = hdr->dout_xfer_len ? WRITE : READ;
237 return ret; 239 return ret;
238 } 240 }
239 241
240 /* 242 /*
241 * map sg_io_v4 to a request. 243 * map sg_io_v4 to a request.
242 */ 244 */
243 static struct request * 245 static struct request *
244 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) 246 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
245 { 247 {
246 struct request_queue *q = bd->queue; 248 struct request_queue *q = bd->queue;
247 struct request *rq, *next_rq = NULL; 249 struct request *rq, *next_rq = NULL;
248 int ret, rw; 250 int ret, rw;
249 unsigned int dxfer_len; 251 unsigned int dxfer_len;
250 void *dxferp = NULL; 252 void *dxferp = NULL;
251 253
252 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, 254 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
253 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 255 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
254 hdr->din_xfer_len); 256 hdr->din_xfer_len);
255 257
256 ret = bsg_validate_sgv4_hdr(q, hdr, &rw); 258 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
257 if (ret) 259 if (ret)
258 return ERR_PTR(ret); 260 return ERR_PTR(ret);
259 261
260 /* 262 /*
261 * map scatter-gather elements seperately and string them to request 263 * map scatter-gather elements seperately and string them to request
262 */ 264 */
263 rq = blk_get_request(q, rw, GFP_KERNEL); 265 rq = blk_get_request(q, rw, GFP_KERNEL);
264 if (!rq) 266 if (!rq)
265 return ERR_PTR(-ENOMEM); 267 return ERR_PTR(-ENOMEM);
266 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, 268 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd);
267 &bd->flags));
268 if (ret) 269 if (ret)
269 goto out; 270 goto out;
270 271
271 if (rw == WRITE && hdr->din_xfer_len) { 272 if (rw == WRITE && hdr->din_xfer_len) {
272 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 273 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
273 ret = -EOPNOTSUPP; 274 ret = -EOPNOTSUPP;
274 goto out; 275 goto out;
275 } 276 }
276 277
277 next_rq = blk_get_request(q, READ, GFP_KERNEL); 278 next_rq = blk_get_request(q, READ, GFP_KERNEL);
278 if (!next_rq) { 279 if (!next_rq) {
279 ret = -ENOMEM; 280 ret = -ENOMEM;
280 goto out; 281 goto out;
281 } 282 }
282 rq->next_rq = next_rq; 283 rq->next_rq = next_rq;
283 next_rq->cmd_type = rq->cmd_type; 284 next_rq->cmd_type = rq->cmd_type;
284 285
285 dxferp = (void*)(unsigned long)hdr->din_xferp; 286 dxferp = (void*)(unsigned long)hdr->din_xferp;
286 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); 287 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
287 if (ret) 288 if (ret)
288 goto out; 289 goto out;
289 } 290 }
290 291
291 if (hdr->dout_xfer_len) { 292 if (hdr->dout_xfer_len) {
292 dxfer_len = hdr->dout_xfer_len; 293 dxfer_len = hdr->dout_xfer_len;
293 dxferp = (void*)(unsigned long)hdr->dout_xferp; 294 dxferp = (void*)(unsigned long)hdr->dout_xferp;
294 } else if (hdr->din_xfer_len) { 295 } else if (hdr->din_xfer_len) {
295 dxfer_len = hdr->din_xfer_len; 296 dxfer_len = hdr->din_xfer_len;
296 dxferp = (void*)(unsigned long)hdr->din_xferp; 297 dxferp = (void*)(unsigned long)hdr->din_xferp;
297 } else 298 } else
298 dxfer_len = 0; 299 dxfer_len = 0;
299 300
300 if (dxfer_len) { 301 if (dxfer_len) {
301 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); 302 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
302 if (ret) 303 if (ret)
303 goto out; 304 goto out;
304 } 305 }
305 return rq; 306 return rq;
306 out: 307 out:
307 if (rq->cmd != rq->__cmd) 308 if (rq->cmd != rq->__cmd)
308 kfree(rq->cmd); 309 kfree(rq->cmd);
309 blk_put_request(rq); 310 blk_put_request(rq);
310 if (next_rq) { 311 if (next_rq) {
311 blk_rq_unmap_user(next_rq->bio); 312 blk_rq_unmap_user(next_rq->bio);
312 blk_put_request(next_rq); 313 blk_put_request(next_rq);
313 } 314 }
314 return ERR_PTR(ret); 315 return ERR_PTR(ret);
315 } 316 }
316 317
317 /* 318 /*
318 * async completion call-back from the block layer, when scsi/ide/whatever 319 * async completion call-back from the block layer, when scsi/ide/whatever
319 * calls end_that_request_last() on a request 320 * calls end_that_request_last() on a request
320 */ 321 */
321 static void bsg_rq_end_io(struct request *rq, int uptodate) 322 static void bsg_rq_end_io(struct request *rq, int uptodate)
322 { 323 {
323 struct bsg_command *bc = rq->end_io_data; 324 struct bsg_command *bc = rq->end_io_data;
324 struct bsg_device *bd = bc->bd; 325 struct bsg_device *bd = bc->bd;
325 unsigned long flags; 326 unsigned long flags;
326 327
327 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", 328 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
328 bd->name, rq, bc, bc->bio, uptodate); 329 bd->name, rq, bc, bc->bio, uptodate);
329 330
330 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); 331 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
331 332
332 spin_lock_irqsave(&bd->lock, flags); 333 spin_lock_irqsave(&bd->lock, flags);
333 list_move_tail(&bc->list, &bd->done_list); 334 list_move_tail(&bc->list, &bd->done_list);
334 bd->done_cmds++; 335 bd->done_cmds++;
335 spin_unlock_irqrestore(&bd->lock, flags); 336 spin_unlock_irqrestore(&bd->lock, flags);
336 337
337 wake_up(&bd->wq_done); 338 wake_up(&bd->wq_done);
338 } 339 }
339 340
340 /* 341 /*
341 * do final setup of a 'bc' and submit the matching 'rq' to the block 342 * do final setup of a 'bc' and submit the matching 'rq' to the block
342 * layer for io 343 * layer for io
343 */ 344 */
344 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 345 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
345 struct bsg_command *bc, struct request *rq) 346 struct bsg_command *bc, struct request *rq)
346 { 347 {
347 rq->sense = bc->sense; 348 rq->sense = bc->sense;
348 rq->sense_len = 0; 349 rq->sense_len = 0;
349 350
350 /* 351 /*
351 * add bc command to busy queue and submit rq for io 352 * add bc command to busy queue and submit rq for io
352 */ 353 */
353 bc->rq = rq; 354 bc->rq = rq;
354 bc->bio = rq->bio; 355 bc->bio = rq->bio;
355 if (rq->next_rq) 356 if (rq->next_rq)
356 bc->bidi_bio = rq->next_rq->bio; 357 bc->bidi_bio = rq->next_rq->bio;
357 bc->hdr.duration = jiffies; 358 bc->hdr.duration = jiffies;
358 spin_lock_irq(&bd->lock); 359 spin_lock_irq(&bd->lock);
359 list_add_tail(&bc->list, &bd->busy_list); 360 list_add_tail(&bc->list, &bd->busy_list);
360 spin_unlock_irq(&bd->lock); 361 spin_unlock_irq(&bd->lock);
361 362
362 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); 363 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
363 364
364 rq->end_io_data = bc; 365 rq->end_io_data = bc;
365 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); 366 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
366 } 367 }
367 368
368 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) 369 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
369 { 370 {
370 struct bsg_command *bc = NULL; 371 struct bsg_command *bc = NULL;
371 372
372 spin_lock_irq(&bd->lock); 373 spin_lock_irq(&bd->lock);
373 if (bd->done_cmds) { 374 if (bd->done_cmds) {
374 bc = list_first_entry(&bd->done_list, struct bsg_command, list); 375 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
375 list_del(&bc->list); 376 list_del(&bc->list);
376 bd->done_cmds--; 377 bd->done_cmds--;
377 } 378 }
378 spin_unlock_irq(&bd->lock); 379 spin_unlock_irq(&bd->lock);
379 380
380 return bc; 381 return bc;
381 } 382 }
382 383
383 /* 384 /*
384 * Get a finished command from the done list 385 * Get a finished command from the done list
385 */ 386 */
386 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) 387 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
387 { 388 {
388 struct bsg_command *bc; 389 struct bsg_command *bc;
389 int ret; 390 int ret;
390 391
391 do { 392 do {
392 bc = bsg_next_done_cmd(bd); 393 bc = bsg_next_done_cmd(bd);
393 if (bc) 394 if (bc)
394 break; 395 break;
395 396
396 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 397 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
397 bc = ERR_PTR(-EAGAIN); 398 bc = ERR_PTR(-EAGAIN);
398 break; 399 break;
399 } 400 }
400 401
401 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); 402 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
402 if (ret) { 403 if (ret) {
403 bc = ERR_PTR(-ERESTARTSYS); 404 bc = ERR_PTR(-ERESTARTSYS);
404 break; 405 break;
405 } 406 }
406 } while (1); 407 } while (1);
407 408
408 dprintk("%s: returning done %p\n", bd->name, bc); 409 dprintk("%s: returning done %p\n", bd->name, bc);
409 410
410 return bc; 411 return bc;
411 } 412 }
412 413
413 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 414 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
414 struct bio *bio, struct bio *bidi_bio) 415 struct bio *bio, struct bio *bidi_bio)
415 { 416 {
416 int ret = 0; 417 int ret = 0;
417 418
418 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); 419 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
419 /* 420 /*
420 * fill in all the output members 421 * fill in all the output members
421 */ 422 */
422 hdr->device_status = status_byte(rq->errors); 423 hdr->device_status = status_byte(rq->errors);
423 hdr->transport_status = host_byte(rq->errors); 424 hdr->transport_status = host_byte(rq->errors);
424 hdr->driver_status = driver_byte(rq->errors); 425 hdr->driver_status = driver_byte(rq->errors);
425 hdr->info = 0; 426 hdr->info = 0;
426 if (hdr->device_status || hdr->transport_status || hdr->driver_status) 427 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
427 hdr->info |= SG_INFO_CHECK; 428 hdr->info |= SG_INFO_CHECK;
428 hdr->response_len = 0; 429 hdr->response_len = 0;
429 430
430 if (rq->sense_len && hdr->response) { 431 if (rq->sense_len && hdr->response) {
431 int len = min_t(unsigned int, hdr->max_response_len, 432 int len = min_t(unsigned int, hdr->max_response_len,
432 rq->sense_len); 433 rq->sense_len);
433 434
434 ret = copy_to_user((void*)(unsigned long)hdr->response, 435 ret = copy_to_user((void*)(unsigned long)hdr->response,
435 rq->sense, len); 436 rq->sense, len);
436 if (!ret) 437 if (!ret)
437 hdr->response_len = len; 438 hdr->response_len = len;
438 else 439 else
439 ret = -EFAULT; 440 ret = -EFAULT;
440 } 441 }
441 442
442 if (rq->next_rq) { 443 if (rq->next_rq) {
443 hdr->dout_resid = rq->data_len; 444 hdr->dout_resid = rq->data_len;
444 hdr->din_resid = rq->next_rq->data_len; 445 hdr->din_resid = rq->next_rq->data_len;
445 blk_rq_unmap_user(bidi_bio); 446 blk_rq_unmap_user(bidi_bio);
446 blk_put_request(rq->next_rq); 447 blk_put_request(rq->next_rq);
447 } else if (rq_data_dir(rq) == READ) 448 } else if (rq_data_dir(rq) == READ)
448 hdr->din_resid = rq->data_len; 449 hdr->din_resid = rq->data_len;
449 else 450 else
450 hdr->dout_resid = rq->data_len; 451 hdr->dout_resid = rq->data_len;
451 452
452 /* 453 /*
453 * If the request generated a negative error number, return it 454 * If the request generated a negative error number, return it
454 * (providing we aren't already returning an error); if it's 455 * (providing we aren't already returning an error); if it's
455 * just a protocol response (i.e. non negative), that gets 456 * just a protocol response (i.e. non negative), that gets
456 * processed above. 457 * processed above.
457 */ 458 */
458 if (!ret && rq->errors < 0) 459 if (!ret && rq->errors < 0)
459 ret = rq->errors; 460 ret = rq->errors;
460 461
461 blk_rq_unmap_user(bio); 462 blk_rq_unmap_user(bio);
462 if (rq->cmd != rq->__cmd) 463 if (rq->cmd != rq->__cmd)
463 kfree(rq->cmd); 464 kfree(rq->cmd);
464 blk_put_request(rq); 465 blk_put_request(rq);
465 466
466 return ret; 467 return ret;
467 } 468 }
468 469
469 static int bsg_complete_all_commands(struct bsg_device *bd) 470 static int bsg_complete_all_commands(struct bsg_device *bd)
470 { 471 {
471 struct bsg_command *bc; 472 struct bsg_command *bc;
472 int ret, tret; 473 int ret, tret;
473 474
474 dprintk("%s: entered\n", bd->name); 475 dprintk("%s: entered\n", bd->name);
475 476
476 /* 477 /*
477 * wait for all commands to complete 478 * wait for all commands to complete
478 */ 479 */
479 ret = 0; 480 ret = 0;
480 do { 481 do {
481 ret = bsg_io_schedule(bd); 482 ret = bsg_io_schedule(bd);
482 /* 483 /*
483 * look for -ENODATA specifically -- we'll sometimes get 484 * look for -ENODATA specifically -- we'll sometimes get
484 * -ERESTARTSYS when we've taken a signal, but we can't 485 * -ERESTARTSYS when we've taken a signal, but we can't
485 * return until we're done freeing the queue, so ignore 486 * return until we're done freeing the queue, so ignore
486 * it. The signal will get handled when we're done freeing 487 * it. The signal will get handled when we're done freeing
487 * the bsg_device. 488 * the bsg_device.
488 */ 489 */
489 } while (ret != -ENODATA); 490 } while (ret != -ENODATA);
490 491
491 /* 492 /*
492 * discard done commands 493 * discard done commands
493 */ 494 */
494 ret = 0; 495 ret = 0;
495 do { 496 do {
496 spin_lock_irq(&bd->lock); 497 spin_lock_irq(&bd->lock);
497 if (!bd->queued_cmds) { 498 if (!bd->queued_cmds) {
498 spin_unlock_irq(&bd->lock); 499 spin_unlock_irq(&bd->lock);
499 break; 500 break;
500 } 501 }
501 spin_unlock_irq(&bd->lock); 502 spin_unlock_irq(&bd->lock);
502 503
503 bc = bsg_get_done_cmd(bd); 504 bc = bsg_get_done_cmd(bd);
504 if (IS_ERR(bc)) 505 if (IS_ERR(bc))
505 break; 506 break;
506 507
507 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 508 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
508 bc->bidi_bio); 509 bc->bidi_bio);
509 if (!ret) 510 if (!ret)
510 ret = tret; 511 ret = tret;
511 512
512 bsg_free_command(bc); 513 bsg_free_command(bc);
513 } while (1); 514 } while (1);
514 515
515 return ret; 516 return ret;
516 } 517 }
517 518
518 static int 519 static int
519 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, 520 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
520 const struct iovec *iov, ssize_t *bytes_read) 521 const struct iovec *iov, ssize_t *bytes_read)
521 { 522 {
522 struct bsg_command *bc; 523 struct bsg_command *bc;
523 int nr_commands, ret; 524 int nr_commands, ret;
524 525
525 if (count % sizeof(struct sg_io_v4)) 526 if (count % sizeof(struct sg_io_v4))
526 return -EINVAL; 527 return -EINVAL;
527 528
528 ret = 0; 529 ret = 0;
529 nr_commands = count / sizeof(struct sg_io_v4); 530 nr_commands = count / sizeof(struct sg_io_v4);
530 while (nr_commands) { 531 while (nr_commands) {
531 bc = bsg_get_done_cmd(bd); 532 bc = bsg_get_done_cmd(bd);
532 if (IS_ERR(bc)) { 533 if (IS_ERR(bc)) {
533 ret = PTR_ERR(bc); 534 ret = PTR_ERR(bc);
534 break; 535 break;
535 } 536 }
536 537
537 /* 538 /*
538 * this is the only case where we need to copy data back 539 * this is the only case where we need to copy data back
539 * after completing the request. so do that here, 540 * after completing the request. so do that here,
540 * bsg_complete_work() cannot do that for us 541 * bsg_complete_work() cannot do that for us
541 */ 542 */
542 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 543 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
543 bc->bidi_bio); 544 bc->bidi_bio);
544 545
545 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) 546 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
546 ret = -EFAULT; 547 ret = -EFAULT;
547 548
548 bsg_free_command(bc); 549 bsg_free_command(bc);
549 550
550 if (ret) 551 if (ret)
551 break; 552 break;
552 553
553 buf += sizeof(struct sg_io_v4); 554 buf += sizeof(struct sg_io_v4);
554 *bytes_read += sizeof(struct sg_io_v4); 555 *bytes_read += sizeof(struct sg_io_v4);
555 nr_commands--; 556 nr_commands--;
556 } 557 }
557 558
558 return ret; 559 return ret;
559 } 560 }
560 561
561 static inline void bsg_set_block(struct bsg_device *bd, struct file *file) 562 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
562 { 563 {
563 if (file->f_flags & O_NONBLOCK) 564 if (file->f_flags & O_NONBLOCK)
564 clear_bit(BSG_F_BLOCK, &bd->flags); 565 clear_bit(BSG_F_BLOCK, &bd->flags);
565 else 566 else
566 set_bit(BSG_F_BLOCK, &bd->flags); 567 set_bit(BSG_F_BLOCK, &bd->flags);
567 } 568 }
568 569
569 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file) 570 static void bsg_set_cmd_filter(struct bsg_device *bd,
571 struct file *file)
570 { 572 {
571 if (file->f_mode & FMODE_WRITE) 573 struct inode *inode;
572 set_bit(BSG_F_WRITE_PERM, &bd->flags); 574 struct gendisk *disk;
573 else 575
574 clear_bit(BSG_F_WRITE_PERM, &bd->flags); 576 if (!file)
577 return;
578
579 inode = file->f_dentry->d_inode;
580 if (!inode)
581 return;
582
583 disk = inode->i_bdev->bd_disk;
584
585 bd->cmd_filter = &disk->cmd_filter;
586 bd->f_mode = &file->f_mode;
575 } 587 }
576 588
577 /* 589 /*
578 * Check if the error is a "real" error that we should return. 590 * Check if the error is a "real" error that we should return.
579 */ 591 */
580 static inline int err_block_err(int ret) 592 static inline int err_block_err(int ret)
581 { 593 {
582 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) 594 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
583 return 1; 595 return 1;
584 596
585 return 0; 597 return 0;
586 } 598 }
587 599
588 static ssize_t 600 static ssize_t
589 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 601 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
590 { 602 {
591 struct bsg_device *bd = file->private_data; 603 struct bsg_device *bd = file->private_data;
592 int ret; 604 int ret;
593 ssize_t bytes_read; 605 ssize_t bytes_read;
594 606
595 dprintk("%s: read %Zd bytes\n", bd->name, count); 607 dprintk("%s: read %Zd bytes\n", bd->name, count);
596 608
597 bsg_set_block(bd, file); 609 bsg_set_block(bd, file);
610 bsg_set_cmd_filter(bd, file);
611
598 bytes_read = 0; 612 bytes_read = 0;
599 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); 613 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
600 *ppos = bytes_read; 614 *ppos = bytes_read;
601 615
602 if (!bytes_read || (bytes_read && err_block_err(ret))) 616 if (!bytes_read || (bytes_read && err_block_err(ret)))
603 bytes_read = ret; 617 bytes_read = ret;
604 618
605 return bytes_read; 619 return bytes_read;
606 } 620 }
607 621
608 static int __bsg_write(struct bsg_device *bd, const char __user *buf, 622 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
609 size_t count, ssize_t *bytes_written) 623 size_t count, ssize_t *bytes_written)
610 { 624 {
611 struct bsg_command *bc; 625 struct bsg_command *bc;
612 struct request *rq; 626 struct request *rq;
613 int ret, nr_commands; 627 int ret, nr_commands;
614 628
615 if (count % sizeof(struct sg_io_v4)) 629 if (count % sizeof(struct sg_io_v4))
616 return -EINVAL; 630 return -EINVAL;
617 631
618 nr_commands = count / sizeof(struct sg_io_v4); 632 nr_commands = count / sizeof(struct sg_io_v4);
619 rq = NULL; 633 rq = NULL;
620 bc = NULL; 634 bc = NULL;
621 ret = 0; 635 ret = 0;
622 while (nr_commands) { 636 while (nr_commands) {
623 struct request_queue *q = bd->queue; 637 struct request_queue *q = bd->queue;
624 638
625 bc = bsg_alloc_command(bd); 639 bc = bsg_alloc_command(bd);
626 if (IS_ERR(bc)) { 640 if (IS_ERR(bc)) {
627 ret = PTR_ERR(bc); 641 ret = PTR_ERR(bc);
628 bc = NULL; 642 bc = NULL;
629 break; 643 break;
630 } 644 }
631 645
632 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { 646 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
633 ret = -EFAULT; 647 ret = -EFAULT;
634 break; 648 break;
635 } 649 }
636 650
637 /* 651 /*
638 * get a request, fill in the blanks, and add to request queue 652 * get a request, fill in the blanks, and add to request queue
639 */ 653 */
640 rq = bsg_map_hdr(bd, &bc->hdr); 654 rq = bsg_map_hdr(bd, &bc->hdr);
641 if (IS_ERR(rq)) { 655 if (IS_ERR(rq)) {
642 ret = PTR_ERR(rq); 656 ret = PTR_ERR(rq);
643 rq = NULL; 657 rq = NULL;
644 break; 658 break;
645 } 659 }
646 660
647 bsg_add_command(bd, q, bc, rq); 661 bsg_add_command(bd, q, bc, rq);
648 bc = NULL; 662 bc = NULL;
649 rq = NULL; 663 rq = NULL;
650 nr_commands--; 664 nr_commands--;
651 buf += sizeof(struct sg_io_v4); 665 buf += sizeof(struct sg_io_v4);
652 *bytes_written += sizeof(struct sg_io_v4); 666 *bytes_written += sizeof(struct sg_io_v4);
653 } 667 }
654 668
655 if (bc) 669 if (bc)
656 bsg_free_command(bc); 670 bsg_free_command(bc);
657 671
658 return ret; 672 return ret;
659 } 673 }
660 674
661 static ssize_t 675 static ssize_t
662 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 676 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
663 { 677 {
664 struct bsg_device *bd = file->private_data; 678 struct bsg_device *bd = file->private_data;
665 ssize_t bytes_written; 679 ssize_t bytes_written;
666 int ret; 680 int ret;
667 681
668 dprintk("%s: write %Zd bytes\n", bd->name, count); 682 dprintk("%s: write %Zd bytes\n", bd->name, count);
669 683
670 bsg_set_block(bd, file); 684 bsg_set_block(bd, file);
671 bsg_set_write_perm(bd, file); 685 bsg_set_cmd_filter(bd, file);
672 686
673 bytes_written = 0; 687 bytes_written = 0;
674 ret = __bsg_write(bd, buf, count, &bytes_written); 688 ret = __bsg_write(bd, buf, count, &bytes_written);
675 *ppos = bytes_written; 689 *ppos = bytes_written;
676 690
677 /* 691 /*
678 * return bytes written on non-fatal errors 692 * return bytes written on non-fatal errors
679 */ 693 */
680 if (!bytes_written || (bytes_written && err_block_err(ret))) 694 if (!bytes_written || (bytes_written && err_block_err(ret)))
681 bytes_written = ret; 695 bytes_written = ret;
682 696
683 dprintk("%s: returning %Zd\n", bd->name, bytes_written); 697 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
684 return bytes_written; 698 return bytes_written;
685 } 699 }
686 700
687 static struct bsg_device *bsg_alloc_device(void) 701 static struct bsg_device *bsg_alloc_device(void)
688 { 702 {
689 struct bsg_device *bd; 703 struct bsg_device *bd;
690 704
691 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); 705 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
692 if (unlikely(!bd)) 706 if (unlikely(!bd))
693 return NULL; 707 return NULL;
694 708
695 spin_lock_init(&bd->lock); 709 spin_lock_init(&bd->lock);
696 710
697 bd->max_queue = BSG_DEFAULT_CMDS; 711 bd->max_queue = BSG_DEFAULT_CMDS;
698 712
699 INIT_LIST_HEAD(&bd->busy_list); 713 INIT_LIST_HEAD(&bd->busy_list);
700 INIT_LIST_HEAD(&bd->done_list); 714 INIT_LIST_HEAD(&bd->done_list);
701 INIT_HLIST_NODE(&bd->dev_list); 715 INIT_HLIST_NODE(&bd->dev_list);
702 716
703 init_waitqueue_head(&bd->wq_free); 717 init_waitqueue_head(&bd->wq_free);
704 init_waitqueue_head(&bd->wq_done); 718 init_waitqueue_head(&bd->wq_done);
705 return bd; 719 return bd;
706 } 720 }
707 721
708 static void bsg_kref_release_function(struct kref *kref) 722 static void bsg_kref_release_function(struct kref *kref)
709 { 723 {
710 struct bsg_class_device *bcd = 724 struct bsg_class_device *bcd =
711 container_of(kref, struct bsg_class_device, ref); 725 container_of(kref, struct bsg_class_device, ref);
712 726
713 if (bcd->release) 727 if (bcd->release)
714 bcd->release(bcd->parent); 728 bcd->release(bcd->parent);
715 729
716 put_device(bcd->parent); 730 put_device(bcd->parent);
717 } 731 }
718 732
719 static int bsg_put_device(struct bsg_device *bd) 733 static int bsg_put_device(struct bsg_device *bd)
720 { 734 {
721 int ret = 0, do_free; 735 int ret = 0, do_free;
722 struct request_queue *q = bd->queue; 736 struct request_queue *q = bd->queue;
723 737
724 mutex_lock(&bsg_mutex); 738 mutex_lock(&bsg_mutex);
725 739
726 do_free = atomic_dec_and_test(&bd->ref_count); 740 do_free = atomic_dec_and_test(&bd->ref_count);
727 if (!do_free) 741 if (!do_free)
728 goto out; 742 goto out;
729 743
730 dprintk("%s: tearing down\n", bd->name); 744 dprintk("%s: tearing down\n", bd->name);
731 745
732 /* 746 /*
733 * close can always block 747 * close can always block
734 */ 748 */
735 set_bit(BSG_F_BLOCK, &bd->flags); 749 set_bit(BSG_F_BLOCK, &bd->flags);
736 750
737 /* 751 /*
738 * correct error detection baddies here again. it's the responsibility 752 * correct error detection baddies here again. it's the responsibility
739 * of the app to properly reap commands before close() if it wants 753 * of the app to properly reap commands before close() if it wants
740 * fool-proof error detection 754 * fool-proof error detection
741 */ 755 */
742 ret = bsg_complete_all_commands(bd); 756 ret = bsg_complete_all_commands(bd);
743 757
744 hlist_del(&bd->dev_list); 758 hlist_del(&bd->dev_list);
745 kfree(bd); 759 kfree(bd);
746 out: 760 out:
747 mutex_unlock(&bsg_mutex); 761 mutex_unlock(&bsg_mutex);
748 kref_put(&q->bsg_dev.ref, bsg_kref_release_function); 762 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
749 if (do_free) 763 if (do_free)
750 blk_put_queue(q); 764 blk_put_queue(q);
751 return ret; 765 return ret;
752 } 766 }
753 767
754 static struct bsg_device *bsg_add_device(struct inode *inode, 768 static struct bsg_device *bsg_add_device(struct inode *inode,
755 struct request_queue *rq, 769 struct request_queue *rq,
756 struct file *file) 770 struct file *file)
757 { 771 {
758 struct bsg_device *bd; 772 struct bsg_device *bd;
759 int ret; 773 int ret;
760 #ifdef BSG_DEBUG 774 #ifdef BSG_DEBUG
761 unsigned char buf[32]; 775 unsigned char buf[32];
762 #endif 776 #endif
763 ret = blk_get_queue(rq); 777 ret = blk_get_queue(rq);
764 if (ret) 778 if (ret)
765 return ERR_PTR(-ENXIO); 779 return ERR_PTR(-ENXIO);
766 780
767 bd = bsg_alloc_device(); 781 bd = bsg_alloc_device();
768 if (!bd) { 782 if (!bd) {
769 blk_put_queue(rq); 783 blk_put_queue(rq);
770 return ERR_PTR(-ENOMEM); 784 return ERR_PTR(-ENOMEM);
771 } 785 }
772 786
773 bd->queue = rq; 787 bd->queue = rq;
788
774 bsg_set_block(bd, file); 789 bsg_set_block(bd, file);
790 bsg_set_cmd_filter(bd, file);
775 791
776 atomic_set(&bd->ref_count, 1); 792 atomic_set(&bd->ref_count, 1);
777 mutex_lock(&bsg_mutex); 793 mutex_lock(&bsg_mutex);
778 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); 794 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
779 795
780 strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1); 796 strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
781 dprintk("bound to <%s>, max queue %d\n", 797 dprintk("bound to <%s>, max queue %d\n",
782 format_dev_t(buf, inode->i_rdev), bd->max_queue); 798 format_dev_t(buf, inode->i_rdev), bd->max_queue);
783 799
784 mutex_unlock(&bsg_mutex); 800 mutex_unlock(&bsg_mutex);
785 return bd; 801 return bd;
786 } 802 }
787 803
788 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 804 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
789 { 805 {
790 struct bsg_device *bd; 806 struct bsg_device *bd;
791 struct hlist_node *entry; 807 struct hlist_node *entry;
792 808
793 mutex_lock(&bsg_mutex); 809 mutex_lock(&bsg_mutex);
794 810
795 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 811 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
796 if (bd->queue == q) { 812 if (bd->queue == q) {
797 atomic_inc(&bd->ref_count); 813 atomic_inc(&bd->ref_count);
798 goto found; 814 goto found;
799 } 815 }
800 } 816 }
801 bd = NULL; 817 bd = NULL;
802 found: 818 found:
803 mutex_unlock(&bsg_mutex); 819 mutex_unlock(&bsg_mutex);
804 return bd; 820 return bd;
805 } 821 }
806 822
807 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) 823 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
808 { 824 {
809 struct bsg_device *bd; 825 struct bsg_device *bd;
810 struct bsg_class_device *bcd; 826 struct bsg_class_device *bcd;
811 827
812 /* 828 /*
813 * find the class device 829 * find the class device
814 */ 830 */
815 mutex_lock(&bsg_mutex); 831 mutex_lock(&bsg_mutex);
816 bcd = idr_find(&bsg_minor_idr, iminor(inode)); 832 bcd = idr_find(&bsg_minor_idr, iminor(inode));
817 if (bcd) 833 if (bcd)
818 kref_get(&bcd->ref); 834 kref_get(&bcd->ref);
819 mutex_unlock(&bsg_mutex); 835 mutex_unlock(&bsg_mutex);
820 836
821 if (!bcd) 837 if (!bcd)
822 return ERR_PTR(-ENODEV); 838 return ERR_PTR(-ENODEV);
823 839
824 bd = __bsg_get_device(iminor(inode), bcd->queue); 840 bd = __bsg_get_device(iminor(inode), bcd->queue);
825 if (bd) 841 if (bd)
826 return bd; 842 return bd;
827 843
828 bd = bsg_add_device(inode, bcd->queue, file); 844 bd = bsg_add_device(inode, bcd->queue, file);
829 if (IS_ERR(bd)) 845 if (IS_ERR(bd))
830 kref_put(&bcd->ref, bsg_kref_release_function); 846 kref_put(&bcd->ref, bsg_kref_release_function);
831 847
832 return bd; 848 return bd;
833 } 849 }
834 850
835 static int bsg_open(struct inode *inode, struct file *file) 851 static int bsg_open(struct inode *inode, struct file *file)
836 { 852 {
837 struct bsg_device *bd = bsg_get_device(inode, file); 853 struct bsg_device *bd = bsg_get_device(inode, file);
838 854
839 if (IS_ERR(bd)) 855 if (IS_ERR(bd))
840 return PTR_ERR(bd); 856 return PTR_ERR(bd);
841 857
842 file->private_data = bd; 858 file->private_data = bd;
843 return 0; 859 return 0;
844 } 860 }
845 861
846 static int bsg_release(struct inode *inode, struct file *file) 862 static int bsg_release(struct inode *inode, struct file *file)
847 { 863 {
848 struct bsg_device *bd = file->private_data; 864 struct bsg_device *bd = file->private_data;
849 865
850 file->private_data = NULL; 866 file->private_data = NULL;
851 return bsg_put_device(bd); 867 return bsg_put_device(bd);
852 } 868 }
853 869
854 static unsigned int bsg_poll(struct file *file, poll_table *wait) 870 static unsigned int bsg_poll(struct file *file, poll_table *wait)
855 { 871 {
856 struct bsg_device *bd = file->private_data; 872 struct bsg_device *bd = file->private_data;
857 unsigned int mask = 0; 873 unsigned int mask = 0;
858 874
859 poll_wait(file, &bd->wq_done, wait); 875 poll_wait(file, &bd->wq_done, wait);
860 poll_wait(file, &bd->wq_free, wait); 876 poll_wait(file, &bd->wq_free, wait);
861 877
862 spin_lock_irq(&bd->lock); 878 spin_lock_irq(&bd->lock);
863 if (!list_empty(&bd->done_list)) 879 if (!list_empty(&bd->done_list))
864 mask |= POLLIN | POLLRDNORM; 880 mask |= POLLIN | POLLRDNORM;
865 if (bd->queued_cmds >= bd->max_queue) 881 if (bd->queued_cmds >= bd->max_queue)
866 mask |= POLLOUT; 882 mask |= POLLOUT;
867 spin_unlock_irq(&bd->lock); 883 spin_unlock_irq(&bd->lock);
868 884
869 return mask; 885 return mask;
870 } 886 }
871 887
872 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 888 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
873 { 889 {
874 struct bsg_device *bd = file->private_data; 890 struct bsg_device *bd = file->private_data;
875 int __user *uarg = (int __user *) arg; 891 int __user *uarg = (int __user *) arg;
876 int ret; 892 int ret;
877 893
878 switch (cmd) { 894 switch (cmd) {
879 /* 895 /*
880 * our own ioctls 896 * our own ioctls
881 */ 897 */
882 case SG_GET_COMMAND_Q: 898 case SG_GET_COMMAND_Q:
883 return put_user(bd->max_queue, uarg); 899 return put_user(bd->max_queue, uarg);
884 case SG_SET_COMMAND_Q: { 900 case SG_SET_COMMAND_Q: {
885 int queue; 901 int queue;
886 902
887 if (get_user(queue, uarg)) 903 if (get_user(queue, uarg))
888 return -EFAULT; 904 return -EFAULT;
889 if (queue < 1) 905 if (queue < 1)
890 return -EINVAL; 906 return -EINVAL;
891 907
892 spin_lock_irq(&bd->lock); 908 spin_lock_irq(&bd->lock);
893 bd->max_queue = queue; 909 bd->max_queue = queue;
894 spin_unlock_irq(&bd->lock); 910 spin_unlock_irq(&bd->lock);
895 return 0; 911 return 0;
896 } 912 }
897 913
898 /* 914 /*
899 * SCSI/sg ioctls 915 * SCSI/sg ioctls
900 */ 916 */
901 case SG_GET_VERSION_NUM: 917 case SG_GET_VERSION_NUM:
902 case SCSI_IOCTL_GET_IDLUN: 918 case SCSI_IOCTL_GET_IDLUN:
903 case SCSI_IOCTL_GET_BUS_NUMBER: 919 case SCSI_IOCTL_GET_BUS_NUMBER:
904 case SG_SET_TIMEOUT: 920 case SG_SET_TIMEOUT:
905 case SG_GET_TIMEOUT: 921 case SG_GET_TIMEOUT:
906 case SG_GET_RESERVED_SIZE: 922 case SG_GET_RESERVED_SIZE:
907 case SG_SET_RESERVED_SIZE: 923 case SG_SET_RESERVED_SIZE:
908 case SG_EMULATED_HOST: 924 case SG_EMULATED_HOST:
909 case SCSI_IOCTL_SEND_COMMAND: { 925 case SCSI_IOCTL_SEND_COMMAND: {
910 void __user *uarg = (void __user *) arg; 926 void __user *uarg = (void __user *) arg;
911 return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg); 927 return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
912 } 928 }
913 case SG_IO: { 929 case SG_IO: {
914 struct request *rq; 930 struct request *rq;
915 struct bio *bio, *bidi_bio = NULL; 931 struct bio *bio, *bidi_bio = NULL;
916 struct sg_io_v4 hdr; 932 struct sg_io_v4 hdr;
917 933
918 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 934 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
919 return -EFAULT; 935 return -EFAULT;
920 936
921 rq = bsg_map_hdr(bd, &hdr); 937 rq = bsg_map_hdr(bd, &hdr);
922 if (IS_ERR(rq)) 938 if (IS_ERR(rq))
923 return PTR_ERR(rq); 939 return PTR_ERR(rq);
924 940
925 bio = rq->bio; 941 bio = rq->bio;
926 if (rq->next_rq) 942 if (rq->next_rq)
927 bidi_bio = rq->next_rq->bio; 943 bidi_bio = rq->next_rq->bio;
928 blk_execute_rq(bd->queue, NULL, rq, 0); 944 blk_execute_rq(bd->queue, NULL, rq, 0);
929 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); 945 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
930 946
931 if (copy_to_user(uarg, &hdr, sizeof(hdr))) 947 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
932 return -EFAULT; 948 return -EFAULT;
933 949
934 return ret; 950 return ret;
935 } 951 }
936 /* 952 /*
937 * block device ioctls 953 * block device ioctls
938 */ 954 */
939 default: 955 default:
940 #if 0 956 #if 0
941 return ioctl_by_bdev(bd->bdev, cmd, arg); 957 return ioctl_by_bdev(bd->bdev, cmd, arg);
942 #else 958 #else
943 return -ENOTTY; 959 return -ENOTTY;
944 #endif 960 #endif
945 } 961 }
946 } 962 }
947 963
948 static const struct file_operations bsg_fops = { 964 static const struct file_operations bsg_fops = {
949 .read = bsg_read, 965 .read = bsg_read,
950 .write = bsg_write, 966 .write = bsg_write,
951 .poll = bsg_poll, 967 .poll = bsg_poll,
952 .open = bsg_open, 968 .open = bsg_open,
953 .release = bsg_release, 969 .release = bsg_release,
954 .unlocked_ioctl = bsg_ioctl, 970 .unlocked_ioctl = bsg_ioctl,
955 .owner = THIS_MODULE, 971 .owner = THIS_MODULE,
956 }; 972 };
957 973
958 void bsg_unregister_queue(struct request_queue *q) 974 void bsg_unregister_queue(struct request_queue *q)
959 { 975 {
960 struct bsg_class_device *bcd = &q->bsg_dev; 976 struct bsg_class_device *bcd = &q->bsg_dev;
961 977
962 if (!bcd->class_dev) 978 if (!bcd->class_dev)
963 return; 979 return;
964 980
965 mutex_lock(&bsg_mutex); 981 mutex_lock(&bsg_mutex);
966 idr_remove(&bsg_minor_idr, bcd->minor); 982 idr_remove(&bsg_minor_idr, bcd->minor);
967 sysfs_remove_link(&q->kobj, "bsg"); 983 sysfs_remove_link(&q->kobj, "bsg");
968 device_unregister(bcd->class_dev); 984 device_unregister(bcd->class_dev);
969 bcd->class_dev = NULL; 985 bcd->class_dev = NULL;
970 kref_put(&bcd->ref, bsg_kref_release_function); 986 kref_put(&bcd->ref, bsg_kref_release_function);
971 mutex_unlock(&bsg_mutex); 987 mutex_unlock(&bsg_mutex);
972 } 988 }
973 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 989 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
974 990
975 int bsg_register_queue(struct request_queue *q, struct device *parent, 991 int bsg_register_queue(struct request_queue *q, struct device *parent,
976 const char *name, void (*release)(struct device *)) 992 const char *name, void (*release)(struct device *))
977 { 993 {
978 struct bsg_class_device *bcd; 994 struct bsg_class_device *bcd;
979 dev_t dev; 995 dev_t dev;
980 int ret, minor; 996 int ret, minor;
981 struct device *class_dev = NULL; 997 struct device *class_dev = NULL;
982 const char *devname; 998 const char *devname;
983 999
984 if (name) 1000 if (name)
985 devname = name; 1001 devname = name;
986 else 1002 else
987 devname = parent->bus_id; 1003 devname = parent->bus_id;
988 1004
989 /* 1005 /*
990 * we need a proper transport to send commands, not a stacked device 1006 * we need a proper transport to send commands, not a stacked device
991 */ 1007 */
992 if (!q->request_fn) 1008 if (!q->request_fn)
993 return 0; 1009 return 0;
994 1010
995 bcd = &q->bsg_dev; 1011 bcd = &q->bsg_dev;
996 memset(bcd, 0, sizeof(*bcd)); 1012 memset(bcd, 0, sizeof(*bcd));
997 1013
998 mutex_lock(&bsg_mutex); 1014 mutex_lock(&bsg_mutex);
999 1015
1000 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); 1016 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
1001 if (!ret) { 1017 if (!ret) {
1002 ret = -ENOMEM; 1018 ret = -ENOMEM;
1003 goto unlock; 1019 goto unlock;
1004 } 1020 }
1005 1021
1006 ret = idr_get_new(&bsg_minor_idr, bcd, &minor); 1022 ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
1007 if (ret < 0) 1023 if (ret < 0)
1008 goto unlock; 1024 goto unlock;
1009 1025
1010 if (minor >= BSG_MAX_DEVS) { 1026 if (minor >= BSG_MAX_DEVS) {
1011 printk(KERN_ERR "bsg: too many bsg devices\n"); 1027 printk(KERN_ERR "bsg: too many bsg devices\n");
1012 ret = -EINVAL; 1028 ret = -EINVAL;
1013 goto remove_idr; 1029 goto remove_idr;
1014 } 1030 }
1015 1031
1016 bcd->minor = minor; 1032 bcd->minor = minor;
1017 bcd->queue = q; 1033 bcd->queue = q;
1018 bcd->parent = get_device(parent); 1034 bcd->parent = get_device(parent);
1019 bcd->release = release; 1035 bcd->release = release;
1020 kref_init(&bcd->ref); 1036 kref_init(&bcd->ref);
1021 dev = MKDEV(bsg_major, bcd->minor); 1037 dev = MKDEV(bsg_major, bcd->minor);
1022 class_dev = device_create(bsg_class, parent, dev, "%s", devname); 1038 class_dev = device_create(bsg_class, parent, dev, "%s", devname);
1023 if (IS_ERR(class_dev)) { 1039 if (IS_ERR(class_dev)) {
1024 ret = PTR_ERR(class_dev); 1040 ret = PTR_ERR(class_dev);
1025 goto put_dev; 1041 goto put_dev;
1026 } 1042 }
1027 bcd->class_dev = class_dev; 1043 bcd->class_dev = class_dev;
1028 1044
1029 if (q->kobj.sd) { 1045 if (q->kobj.sd) {
1030 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); 1046 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1031 if (ret) 1047 if (ret)
1032 goto unregister_class_dev; 1048 goto unregister_class_dev;
1033 } 1049 }
1034 1050
1035 mutex_unlock(&bsg_mutex); 1051 mutex_unlock(&bsg_mutex);
1036 return 0; 1052 return 0;
1037 1053
1038 unregister_class_dev: 1054 unregister_class_dev:
1039 device_unregister(class_dev); 1055 device_unregister(class_dev);
1040 put_dev: 1056 put_dev:
1041 put_device(parent); 1057 put_device(parent);
1042 remove_idr: 1058 remove_idr:
1043 idr_remove(&bsg_minor_idr, minor); 1059 idr_remove(&bsg_minor_idr, minor);
1044 unlock: 1060 unlock:
1045 mutex_unlock(&bsg_mutex); 1061 mutex_unlock(&bsg_mutex);
1046 return ret; 1062 return ret;
1047 } 1063 }
1048 EXPORT_SYMBOL_GPL(bsg_register_queue); 1064 EXPORT_SYMBOL_GPL(bsg_register_queue);
1049 1065
1050 static struct cdev bsg_cdev; 1066 static struct cdev bsg_cdev;
1051 1067
1052 static int __init bsg_init(void) 1068 static int __init bsg_init(void)
1053 { 1069 {
1054 int ret, i; 1070 int ret, i;
1055 dev_t devid; 1071 dev_t devid;
1056 1072
1057 bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 1073 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1058 sizeof(struct bsg_command), 0, 0, NULL); 1074 sizeof(struct bsg_command), 0, 0, NULL);
1059 if (!bsg_cmd_cachep) { 1075 if (!bsg_cmd_cachep) {
1060 printk(KERN_ERR "bsg: failed creating slab cache\n"); 1076 printk(KERN_ERR "bsg: failed creating slab cache\n");
1061 return -ENOMEM; 1077 return -ENOMEM;
1062 } 1078 }
1063 1079
1064 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) 1080 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1065 INIT_HLIST_HEAD(&bsg_device_list[i]); 1081 INIT_HLIST_HEAD(&bsg_device_list[i]);
1066 1082
1067 bsg_class = class_create(THIS_MODULE, "bsg"); 1083 bsg_class = class_create(THIS_MODULE, "bsg");
1068 if (IS_ERR(bsg_class)) { 1084 if (IS_ERR(bsg_class)) {
1069 ret = PTR_ERR(bsg_class); 1085 ret = PTR_ERR(bsg_class);
1070 goto destroy_kmemcache; 1086 goto destroy_kmemcache;
1071 } 1087 }
1072 1088
1073 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); 1089 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1074 if (ret) 1090 if (ret)
1075 goto destroy_bsg_class; 1091 goto destroy_bsg_class;
1076 1092
1077 bsg_major = MAJOR(devid); 1093 bsg_major = MAJOR(devid);
1078 1094
1079 cdev_init(&bsg_cdev, &bsg_fops); 1095 cdev_init(&bsg_cdev, &bsg_fops);
1080 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); 1096 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1081 if (ret) 1097 if (ret)
1082 goto unregister_chrdev; 1098 goto unregister_chrdev;
1083 1099
1084 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION 1100 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1085 " loaded (major %d)\n", bsg_major); 1101 " loaded (major %d)\n", bsg_major);
1086 return 0; 1102 return 0;
1087 unregister_chrdev: 1103 unregister_chrdev:
1088 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); 1104 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1089 destroy_bsg_class: 1105 destroy_bsg_class:
1090 class_destroy(bsg_class); 1106 class_destroy(bsg_class);
1091 destroy_kmemcache: 1107 destroy_kmemcache:
1092 kmem_cache_destroy(bsg_cmd_cachep); 1108 kmem_cache_destroy(bsg_cmd_cachep);
1093 return ret; 1109 return ret;
1094 } 1110 }
1095 1111
1096 MODULE_AUTHOR("Jens Axboe"); 1112 MODULE_AUTHOR("Jens Axboe");
1097 MODULE_DESCRIPTION(BSG_DESCRIPTION); 1113 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1098 MODULE_LICENSE("GPL"); 1114 MODULE_LICENSE("GPL");
1099 1115
File was created 1 /*
2 * Copyright 2004 Peter M. Jones <pjones@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 *
18 */
19
20 #include <linux/list.h>
21 #include <linux/genhd.h>
22 #include <linux/spinlock.h>
23 #include <linux/parser.h>
24 #include <linux/capability.h>
25 #include <linux/bitops.h>
26
27 #include <scsi/scsi.h>
28 #include <linux/cdrom.h>
29
30 int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
31 unsigned char *cmd, mode_t *f_mode)
32 {
33 /* root can do any command. */
34 if (capable(CAP_SYS_RAWIO))
35 return 0;
36
37 /* if there's no filter set, assume we're filtering everything out */
38 if (!filter)
39 return -EPERM;
40
41 /* Anybody who can open the device can do a read-safe command */
42 if (test_bit(cmd[0], filter->read_ok))
43 return 0;
44
45 /* Write-safe commands require a writable open */
46 if (test_bit(cmd[0], filter->write_ok) && (*f_mode & FMODE_WRITE))
47 return 0;
48
49 return -EPERM;
50 }
51 EXPORT_SYMBOL(blk_cmd_filter_verify_command);
52
53 int blk_verify_command(struct file *file, unsigned char *cmd)
54 {
55 struct gendisk *disk;
56 struct inode *inode;
57
58 if (!file)
59 return -EINVAL;
60
61 inode = file->f_dentry->d_inode;
62 if (!inode)
63 return -EINVAL;
64
65 disk = inode->i_bdev->bd_disk;
66
67 return blk_cmd_filter_verify_command(&disk->cmd_filter,
68 cmd, &file->f_mode);
69 }
70 EXPORT_SYMBOL(blk_verify_command);
71
72 /* and now, the sysfs stuff */
73 static ssize_t rcf_cmds_show(struct blk_scsi_cmd_filter *filter, char *page,
74 int rw)
75 {
76 char *npage = page;
77 unsigned long *okbits;
78 int i;
79
80 if (rw == READ)
81 okbits = filter->read_ok;
82 else
83 okbits = filter->write_ok;
84
85 for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) {
86 if (test_bit(i, okbits)) {
87 sprintf(npage, "%02x", i);
88 npage += 2;
89 if (i < BLK_SCSI_MAX_CMDS - 1)
90 sprintf(npage++, " ");
91 }
92 }
93
94 if (npage != page)
95 npage += sprintf(npage, "\n");
96
97 return npage - page;
98 }
99
100 static ssize_t rcf_readcmds_show(struct blk_scsi_cmd_filter *filter, char *page)
101 {
102 return rcf_cmds_show(filter, page, READ);
103 }
104
105 static ssize_t rcf_writecmds_show(struct blk_scsi_cmd_filter *filter,
106 char *page)
107 {
108 return rcf_cmds_show(filter, page, WRITE);
109 }
110
111 static ssize_t rcf_cmds_store(struct blk_scsi_cmd_filter *filter,
112 const char *page, size_t count, int rw)
113 {
114 ssize_t ret = 0;
115 unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits;
116 int cmd, status, len;
117 substring_t ss;
118
119 memset(&okbits, 0, sizeof(okbits));
120
121 for (len = strlen(page); len > 0; len -= 3) {
122 if (len < 2)
123 break;
124 ss.from = (char *) page + ret;
125 ss.to = (char *) page + ret + 2;
126 ret += 3;
127 status = match_hex(&ss, &cmd);
128 /* either of these cases means invalid input, so do nothing. */
129 if (status || cmd >= BLK_SCSI_MAX_CMDS)
130 return -EINVAL;
131
132 __set_bit(cmd, okbits);
133 }
134
135 if (rw == READ)
136 target_okbits = filter->read_ok;
137 else
138 target_okbits = filter->write_ok;
139
140 memmove(target_okbits, okbits, sizeof(okbits));
141 return count;
142 }
143
144 static ssize_t rcf_readcmds_store(struct blk_scsi_cmd_filter *filter,
145 const char *page, size_t count)
146 {
147 return rcf_cmds_store(filter, page, count, READ);
148 }
149
150 static ssize_t rcf_writecmds_store(struct blk_scsi_cmd_filter *filter,
151 const char *page, size_t count)
152 {
153 return rcf_cmds_store(filter, page, count, WRITE);
154 }
155
156 struct rcf_sysfs_entry {
157 struct attribute attr;
158 ssize_t (*show)(struct blk_scsi_cmd_filter *, char *);
159 ssize_t (*store)(struct blk_scsi_cmd_filter *, const char *, size_t);
160 };
161
162 static struct rcf_sysfs_entry rcf_readcmds_entry = {
163 .attr = { .name = "read_table", .mode = S_IRUGO | S_IWUSR },
164 .show = rcf_readcmds_show,
165 .store = rcf_readcmds_store,
166 };
167
168 static struct rcf_sysfs_entry rcf_writecmds_entry = {
169 .attr = {.name = "write_table", .mode = S_IRUGO | S_IWUSR },
170 .show = rcf_writecmds_show,
171 .store = rcf_writecmds_store,
172 };
173
174 static struct attribute *default_attrs[] = {
175 &rcf_readcmds_entry.attr,
176 &rcf_writecmds_entry.attr,
177 NULL,
178 };
179
180 #define to_rcf(atr) container_of((atr), struct rcf_sysfs_entry, attr)
181
182 static ssize_t
183 rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
184 {
185 struct rcf_sysfs_entry *entry = to_rcf(attr);
186 struct blk_scsi_cmd_filter *filter;
187
188 filter = container_of(kobj, struct blk_scsi_cmd_filter, kobj);
189 if (entry->show)
190 return entry->show(filter, page);
191
192 return 0;
193 }
194
195 static ssize_t
196 rcf_attr_store(struct kobject *kobj, struct attribute *attr,
197 const char *page, size_t length)
198 {
199 struct rcf_sysfs_entry *entry = to_rcf(attr);
200 struct blk_scsi_cmd_filter *filter;
201
202 if (!capable(CAP_SYS_RAWIO))
203 return -EPERM;
204
205 if (!entry->store)
206 return -EINVAL;
207
208 filter = container_of(kobj, struct blk_scsi_cmd_filter, kobj);
209 return entry->store(filter, page, length);
210 }
211
212 static struct sysfs_ops rcf_sysfs_ops = {
213 .show = rcf_attr_show,
214 .store = rcf_attr_store,
215 };
216
217 static struct kobj_type rcf_ktype = {
218 .sysfs_ops = &rcf_sysfs_ops,
219 .default_attrs = default_attrs,
220 };
221
222 static void rcf_set_defaults(struct blk_scsi_cmd_filter *filter)
223 {
224 /* Basic read-only commands */
225 __set_bit(TEST_UNIT_READY, filter->read_ok);
226 __set_bit(REQUEST_SENSE, filter->read_ok);
227 __set_bit(READ_6, filter->read_ok);
228 __set_bit(READ_10, filter->read_ok);
229 __set_bit(READ_12, filter->read_ok);
230 __set_bit(READ_16, filter->read_ok);
231 __set_bit(READ_BUFFER, filter->read_ok);
232 __set_bit(READ_DEFECT_DATA, filter->read_ok);
233 __set_bit(READ_LONG, filter->read_ok);
234 __set_bit(INQUIRY, filter->read_ok);
235 __set_bit(MODE_SENSE, filter->read_ok);
236 __set_bit(MODE_SENSE_10, filter->read_ok);
237 __set_bit(LOG_SENSE, filter->read_ok);
238 __set_bit(START_STOP, filter->read_ok);
239 __set_bit(GPCMD_VERIFY_10, filter->read_ok);
240 __set_bit(VERIFY_16, filter->read_ok);
241 __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
242
243 /* Audio CD commands */
244 __set_bit(GPCMD_PLAY_CD, filter->read_ok);
245 __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
246 __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
247 __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
248 __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
249
250 /* CD/DVD data reading */
251 __set_bit(GPCMD_READ_CD, filter->read_ok);
252 __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
253 __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
254 __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
255 __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
256 __set_bit(GPCMD_READ_HEADER, filter->read_ok);
257 __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
258 __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
259 __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
260 __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
261 __set_bit(GPCMD_SCAN, filter->read_ok);
262 __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
263 __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
264 __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
265 __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
266 __set_bit(GPCMD_SEEK, filter->read_ok);
267 __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
268
269 /* Basic writing commands */
270 __set_bit(WRITE_6, filter->write_ok);
271 __set_bit(WRITE_10, filter->write_ok);
272 __set_bit(WRITE_VERIFY, filter->write_ok);
273 __set_bit(WRITE_12, filter->write_ok);
274 __set_bit(WRITE_VERIFY_12, filter->write_ok);
275 __set_bit(WRITE_16, filter->write_ok);
276 __set_bit(WRITE_LONG, filter->write_ok);
277 __set_bit(WRITE_LONG_2, filter->write_ok);
278 __set_bit(ERASE, filter->write_ok);
279 __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
280 __set_bit(MODE_SELECT, filter->write_ok);
281 __set_bit(LOG_SELECT, filter->write_ok);
282 __set_bit(GPCMD_BLANK, filter->write_ok);
283 __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
284 __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
285 __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
286 __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
287 __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
288 __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
289 __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
290 __set_bit(GPCMD_SEND_KEY, filter->write_ok);
291 __set_bit(GPCMD_SEND_OPC, filter->write_ok);
292 __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
293 __set_bit(GPCMD_SET_SPEED, filter->write_ok);
294 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
295 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
296 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
297 }
298
299 int blk_register_filter(struct gendisk *disk)
300 {
301 int ret;
302 struct blk_scsi_cmd_filter *filter = &disk->cmd_filter;
303 struct kobject *parent = kobject_get(disk->holder_dir->parent);
304
305 if (!parent)
306 return -ENODEV;
307
308 ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, parent,
309 "%s", "cmd_filter");
310
311 if (ret < 0)
312 return ret;
313
314 rcf_set_defaults(filter);
315 return 0;
316 }
317
318 void blk_unregister_filter(struct gendisk *disk)
319 {
320 struct blk_scsi_cmd_filter *filter = &disk->cmd_filter;
321
322 kobject_put(&filter->kobj);
323 kobject_put(disk->holder_dir->parent);
324 }
325
326
1 /* 1 /*
2 * gendisk handling 2 * gendisk handling
3 */ 3 */
4 4
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/fs.h> 6 #include <linux/fs.h>
7 #include <linux/genhd.h> 7 #include <linux/genhd.h>
8 #include <linux/kdev_t.h> 8 #include <linux/kdev_t.h>
9 #include <linux/kernel.h> 9 #include <linux/kernel.h>
10 #include <linux/blkdev.h> 10 #include <linux/blkdev.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/spinlock.h> 12 #include <linux/spinlock.h>
13 #include <linux/seq_file.h> 13 #include <linux/seq_file.h>
14 #include <linux/slab.h> 14 #include <linux/slab.h>
15 #include <linux/kmod.h> 15 #include <linux/kmod.h>
16 #include <linux/kobj_map.h> 16 #include <linux/kobj_map.h>
17 #include <linux/buffer_head.h> 17 #include <linux/buffer_head.h>
18 #include <linux/mutex.h> 18 #include <linux/mutex.h>
19 19
20 #include "blk.h" 20 #include "blk.h"
21 21
22 static DEFINE_MUTEX(block_class_lock); 22 static DEFINE_MUTEX(block_class_lock);
23 #ifndef CONFIG_SYSFS_DEPRECATED 23 #ifndef CONFIG_SYSFS_DEPRECATED
24 struct kobject *block_depr; 24 struct kobject *block_depr;
25 #endif 25 #endif
26 26
27 static struct device_type disk_type; 27 static struct device_type disk_type;
28 28
29 /* 29 /*
30 * Can be deleted altogether. Later. 30 * Can be deleted altogether. Later.
31 * 31 *
32 */ 32 */
33 static struct blk_major_name { 33 static struct blk_major_name {
34 struct blk_major_name *next; 34 struct blk_major_name *next;
35 int major; 35 int major;
36 char name[16]; 36 char name[16];
37 } *major_names[BLKDEV_MAJOR_HASH_SIZE]; 37 } *major_names[BLKDEV_MAJOR_HASH_SIZE];
38 38
39 /* index in the above - for now: assume no multimajor ranges */ 39 /* index in the above - for now: assume no multimajor ranges */
40 static inline int major_to_index(int major) 40 static inline int major_to_index(int major)
41 { 41 {
42 return major % BLKDEV_MAJOR_HASH_SIZE; 42 return major % BLKDEV_MAJOR_HASH_SIZE;
43 } 43 }
44 44
45 #ifdef CONFIG_PROC_FS 45 #ifdef CONFIG_PROC_FS
46 void blkdev_show(struct seq_file *f, off_t offset) 46 void blkdev_show(struct seq_file *f, off_t offset)
47 { 47 {
48 struct blk_major_name *dp; 48 struct blk_major_name *dp;
49 49
50 if (offset < BLKDEV_MAJOR_HASH_SIZE) { 50 if (offset < BLKDEV_MAJOR_HASH_SIZE) {
51 mutex_lock(&block_class_lock); 51 mutex_lock(&block_class_lock);
52 for (dp = major_names[offset]; dp; dp = dp->next) 52 for (dp = major_names[offset]; dp; dp = dp->next)
53 seq_printf(f, "%3d %s\n", dp->major, dp->name); 53 seq_printf(f, "%3d %s\n", dp->major, dp->name);
54 mutex_unlock(&block_class_lock); 54 mutex_unlock(&block_class_lock);
55 } 55 }
56 } 56 }
57 #endif /* CONFIG_PROC_FS */ 57 #endif /* CONFIG_PROC_FS */
58 58
59 int register_blkdev(unsigned int major, const char *name) 59 int register_blkdev(unsigned int major, const char *name)
60 { 60 {
61 struct blk_major_name **n, *p; 61 struct blk_major_name **n, *p;
62 int index, ret = 0; 62 int index, ret = 0;
63 63
64 mutex_lock(&block_class_lock); 64 mutex_lock(&block_class_lock);
65 65
66 /* temporary */ 66 /* temporary */
67 if (major == 0) { 67 if (major == 0) {
68 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { 68 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
69 if (major_names[index] == NULL) 69 if (major_names[index] == NULL)
70 break; 70 break;
71 } 71 }
72 72
73 if (index == 0) { 73 if (index == 0) {
74 printk("register_blkdev: failed to get major for %s\n", 74 printk("register_blkdev: failed to get major for %s\n",
75 name); 75 name);
76 ret = -EBUSY; 76 ret = -EBUSY;
77 goto out; 77 goto out;
78 } 78 }
79 major = index; 79 major = index;
80 ret = major; 80 ret = major;
81 } 81 }
82 82
83 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL); 83 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
84 if (p == NULL) { 84 if (p == NULL) {
85 ret = -ENOMEM; 85 ret = -ENOMEM;
86 goto out; 86 goto out;
87 } 87 }
88 88
89 p->major = major; 89 p->major = major;
90 strlcpy(p->name, name, sizeof(p->name)); 90 strlcpy(p->name, name, sizeof(p->name));
91 p->next = NULL; 91 p->next = NULL;
92 index = major_to_index(major); 92 index = major_to_index(major);
93 93
94 for (n = &major_names[index]; *n; n = &(*n)->next) { 94 for (n = &major_names[index]; *n; n = &(*n)->next) {
95 if ((*n)->major == major) 95 if ((*n)->major == major)
96 break; 96 break;
97 } 97 }
98 if (!*n) 98 if (!*n)
99 *n = p; 99 *n = p;
100 else 100 else
101 ret = -EBUSY; 101 ret = -EBUSY;
102 102
103 if (ret < 0) { 103 if (ret < 0) {
104 printk("register_blkdev: cannot get major %d for %s\n", 104 printk("register_blkdev: cannot get major %d for %s\n",
105 major, name); 105 major, name);
106 kfree(p); 106 kfree(p);
107 } 107 }
108 out: 108 out:
109 mutex_unlock(&block_class_lock); 109 mutex_unlock(&block_class_lock);
110 return ret; 110 return ret;
111 } 111 }
112 112
113 EXPORT_SYMBOL(register_blkdev); 113 EXPORT_SYMBOL(register_blkdev);
114 114
115 void unregister_blkdev(unsigned int major, const char *name) 115 void unregister_blkdev(unsigned int major, const char *name)
116 { 116 {
117 struct blk_major_name **n; 117 struct blk_major_name **n;
118 struct blk_major_name *p = NULL; 118 struct blk_major_name *p = NULL;
119 int index = major_to_index(major); 119 int index = major_to_index(major);
120 120
121 mutex_lock(&block_class_lock); 121 mutex_lock(&block_class_lock);
122 for (n = &major_names[index]; *n; n = &(*n)->next) 122 for (n = &major_names[index]; *n; n = &(*n)->next)
123 if ((*n)->major == major) 123 if ((*n)->major == major)
124 break; 124 break;
125 if (!*n || strcmp((*n)->name, name)) { 125 if (!*n || strcmp((*n)->name, name)) {
126 WARN_ON(1); 126 WARN_ON(1);
127 } else { 127 } else {
128 p = *n; 128 p = *n;
129 *n = p->next; 129 *n = p->next;
130 } 130 }
131 mutex_unlock(&block_class_lock); 131 mutex_unlock(&block_class_lock);
132 kfree(p); 132 kfree(p);
133 } 133 }
134 134
135 EXPORT_SYMBOL(unregister_blkdev); 135 EXPORT_SYMBOL(unregister_blkdev);
136 136
137 static struct kobj_map *bdev_map; 137 static struct kobj_map *bdev_map;
138 138
139 /* 139 /*
140 * Register device numbers dev..(dev+range-1) 140 * Register device numbers dev..(dev+range-1)
141 * range must be nonzero 141 * range must be nonzero
142 * The hash chain is sorted on range, so that subranges can override. 142 * The hash chain is sorted on range, so that subranges can override.
143 */ 143 */
144 void blk_register_region(dev_t devt, unsigned long range, struct module *module, 144 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
145 struct kobject *(*probe)(dev_t, int *, void *), 145 struct kobject *(*probe)(dev_t, int *, void *),
146 int (*lock)(dev_t, void *), void *data) 146 int (*lock)(dev_t, void *), void *data)
147 { 147 {
148 kobj_map(bdev_map, devt, range, module, probe, lock, data); 148 kobj_map(bdev_map, devt, range, module, probe, lock, data);
149 } 149 }
150 150
151 EXPORT_SYMBOL(blk_register_region); 151 EXPORT_SYMBOL(blk_register_region);
152 152
153 void blk_unregister_region(dev_t devt, unsigned long range) 153 void blk_unregister_region(dev_t devt, unsigned long range)
154 { 154 {
155 kobj_unmap(bdev_map, devt, range); 155 kobj_unmap(bdev_map, devt, range);
156 } 156 }
157 157
158 EXPORT_SYMBOL(blk_unregister_region); 158 EXPORT_SYMBOL(blk_unregister_region);
159 159
160 static struct kobject *exact_match(dev_t devt, int *part, void *data) 160 static struct kobject *exact_match(dev_t devt, int *part, void *data)
161 { 161 {
162 struct gendisk *p = data; 162 struct gendisk *p = data;
163 163
164 return &p->dev.kobj; 164 return &p->dev.kobj;
165 } 165 }
166 166
167 static int exact_lock(dev_t devt, void *data) 167 static int exact_lock(dev_t devt, void *data)
168 { 168 {
169 struct gendisk *p = data; 169 struct gendisk *p = data;
170 170
171 if (!get_disk(p)) 171 if (!get_disk(p))
172 return -1; 172 return -1;
173 return 0; 173 return 0;
174 } 174 }
175 175
176 /** 176 /**
177 * add_disk - add partitioning information to kernel list 177 * add_disk - add partitioning information to kernel list
178 * @disk: per-device partitioning information 178 * @disk: per-device partitioning information
179 * 179 *
180 * This function registers the partitioning information in @disk 180 * This function registers the partitioning information in @disk
181 * with the kernel. 181 * with the kernel.
182 */ 182 */
183 void add_disk(struct gendisk *disk) 183 void add_disk(struct gendisk *disk)
184 { 184 {
185 struct backing_dev_info *bdi; 185 struct backing_dev_info *bdi;
186 186
187 disk->flags |= GENHD_FL_UP; 187 disk->flags |= GENHD_FL_UP;
188 blk_register_region(MKDEV(disk->major, disk->first_minor), 188 blk_register_region(MKDEV(disk->major, disk->first_minor),
189 disk->minors, NULL, exact_match, exact_lock, disk); 189 disk->minors, NULL, exact_match, exact_lock, disk);
190 register_disk(disk); 190 register_disk(disk);
191 blk_register_queue(disk); 191 blk_register_queue(disk);
192 blk_register_filter(disk);
192 193
193 bdi = &disk->queue->backing_dev_info; 194 bdi = &disk->queue->backing_dev_info;
194 bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor)); 195 bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
195 sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi"); 196 sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi");
196 } 197 }
197 198
198 EXPORT_SYMBOL(add_disk); 199 EXPORT_SYMBOL(add_disk);
199 EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */ 200 EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
200 201
201 void unlink_gendisk(struct gendisk *disk) 202 void unlink_gendisk(struct gendisk *disk)
202 { 203 {
204 blk_unregister_filter(disk);
203 sysfs_remove_link(&disk->dev.kobj, "bdi"); 205 sysfs_remove_link(&disk->dev.kobj, "bdi");
204 bdi_unregister(&disk->queue->backing_dev_info); 206 bdi_unregister(&disk->queue->backing_dev_info);
205 blk_unregister_queue(disk); 207 blk_unregister_queue(disk);
206 blk_unregister_region(MKDEV(disk->major, disk->first_minor), 208 blk_unregister_region(MKDEV(disk->major, disk->first_minor),
207 disk->minors); 209 disk->minors);
208 } 210 }
209 211
210 /** 212 /**
211 * get_gendisk - get partitioning information for a given device 213 * get_gendisk - get partitioning information for a given device
212 * @dev: device to get partitioning information for 214 * @dev: device to get partitioning information for
213 * 215 *
214 * This function gets the structure containing partitioning 216 * This function gets the structure containing partitioning
215 * information for the given device @dev. 217 * information for the given device @dev.
216 */ 218 */
217 struct gendisk *get_gendisk(dev_t devt, int *part) 219 struct gendisk *get_gendisk(dev_t devt, int *part)
218 { 220 {
219 struct kobject *kobj = kobj_lookup(bdev_map, devt, part); 221 struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
220 struct device *dev = kobj_to_dev(kobj); 222 struct device *dev = kobj_to_dev(kobj);
221 223
222 return kobj ? dev_to_disk(dev) : NULL; 224 return kobj ? dev_to_disk(dev) : NULL;
223 } 225 }
224 226
225 /* 227 /*
226 * print a full list of all partitions - intended for places where the root 228 * print a full list of all partitions - intended for places where the root
227 * filesystem can't be mounted and thus to give the victim some idea of what 229 * filesystem can't be mounted and thus to give the victim some idea of what
228 * went wrong 230 * went wrong
229 */ 231 */
230 void __init printk_all_partitions(void) 232 void __init printk_all_partitions(void)
231 { 233 {
232 struct device *dev; 234 struct device *dev;
233 struct gendisk *sgp; 235 struct gendisk *sgp;
234 char buf[BDEVNAME_SIZE]; 236 char buf[BDEVNAME_SIZE];
235 int n; 237 int n;
236 238
237 mutex_lock(&block_class_lock); 239 mutex_lock(&block_class_lock);
238 /* For each block device... */ 240 /* For each block device... */
239 list_for_each_entry(dev, &block_class.devices, node) { 241 list_for_each_entry(dev, &block_class.devices, node) {
240 if (dev->type != &disk_type) 242 if (dev->type != &disk_type)
241 continue; 243 continue;
242 sgp = dev_to_disk(dev); 244 sgp = dev_to_disk(dev);
243 /* 245 /*
244 * Don't show empty devices or things that have been surpressed 246 * Don't show empty devices or things that have been surpressed
245 */ 247 */
246 if (get_capacity(sgp) == 0 || 248 if (get_capacity(sgp) == 0 ||
247 (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) 249 (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
248 continue; 250 continue;
249 251
250 /* 252 /*
251 * Note, unlike /proc/partitions, I am showing the numbers in 253 * Note, unlike /proc/partitions, I am showing the numbers in
252 * hex - the same format as the root= option takes. 254 * hex - the same format as the root= option takes.
253 */ 255 */
254 printk("%02x%02x %10llu %s", 256 printk("%02x%02x %10llu %s",
255 sgp->major, sgp->first_minor, 257 sgp->major, sgp->first_minor,
256 (unsigned long long)get_capacity(sgp) >> 1, 258 (unsigned long long)get_capacity(sgp) >> 1,
257 disk_name(sgp, 0, buf)); 259 disk_name(sgp, 0, buf));
258 if (sgp->driverfs_dev != NULL && 260 if (sgp->driverfs_dev != NULL &&
259 sgp->driverfs_dev->driver != NULL) 261 sgp->driverfs_dev->driver != NULL)
260 printk(" driver: %s\n", 262 printk(" driver: %s\n",
261 sgp->driverfs_dev->driver->name); 263 sgp->driverfs_dev->driver->name);
262 else 264 else
263 printk(" (driver?)\n"); 265 printk(" (driver?)\n");
264 266
265 /* now show the partitions */ 267 /* now show the partitions */
266 for (n = 0; n < sgp->minors - 1; ++n) { 268 for (n = 0; n < sgp->minors - 1; ++n) {
267 if (sgp->part[n] == NULL) 269 if (sgp->part[n] == NULL)
268 continue; 270 continue;
269 if (sgp->part[n]->nr_sects == 0) 271 if (sgp->part[n]->nr_sects == 0)
270 continue; 272 continue;
271 printk(" %02x%02x %10llu %s\n", 273 printk(" %02x%02x %10llu %s\n",
272 sgp->major, n + 1 + sgp->first_minor, 274 sgp->major, n + 1 + sgp->first_minor,
273 (unsigned long long)sgp->part[n]->nr_sects >> 1, 275 (unsigned long long)sgp->part[n]->nr_sects >> 1,
274 disk_name(sgp, n + 1, buf)); 276 disk_name(sgp, n + 1, buf));
275 } 277 }
276 } 278 }
277 279
278 mutex_unlock(&block_class_lock); 280 mutex_unlock(&block_class_lock);
279 } 281 }
280 282
281 #ifdef CONFIG_PROC_FS 283 #ifdef CONFIG_PROC_FS
282 /* iterator */ 284 /* iterator */
283 static void *part_start(struct seq_file *part, loff_t *pos) 285 static void *part_start(struct seq_file *part, loff_t *pos)
284 { 286 {
285 loff_t k = *pos; 287 loff_t k = *pos;
286 struct device *dev; 288 struct device *dev;
287 289
288 mutex_lock(&block_class_lock); 290 mutex_lock(&block_class_lock);
289 list_for_each_entry(dev, &block_class.devices, node) { 291 list_for_each_entry(dev, &block_class.devices, node) {
290 if (dev->type != &disk_type) 292 if (dev->type != &disk_type)
291 continue; 293 continue;
292 if (!k--) 294 if (!k--)
293 return dev_to_disk(dev); 295 return dev_to_disk(dev);
294 } 296 }
295 return NULL; 297 return NULL;
296 } 298 }
297 299
298 static void *part_next(struct seq_file *part, void *v, loff_t *pos) 300 static void *part_next(struct seq_file *part, void *v, loff_t *pos)
299 { 301 {
300 struct gendisk *gp = v; 302 struct gendisk *gp = v;
301 struct device *dev; 303 struct device *dev;
302 ++*pos; 304 ++*pos;
303 list_for_each_entry(dev, &gp->dev.node, node) { 305 list_for_each_entry(dev, &gp->dev.node, node) {
304 if (&dev->node == &block_class.devices) 306 if (&dev->node == &block_class.devices)
305 return NULL; 307 return NULL;
306 if (dev->type == &disk_type) 308 if (dev->type == &disk_type)
307 return dev_to_disk(dev); 309 return dev_to_disk(dev);
308 } 310 }
309 return NULL; 311 return NULL;
310 } 312 }
311 313
312 static void part_stop(struct seq_file *part, void *v) 314 static void part_stop(struct seq_file *part, void *v)
313 { 315 {
314 mutex_unlock(&block_class_lock); 316 mutex_unlock(&block_class_lock);
315 } 317 }
316 318
317 static int show_partition(struct seq_file *part, void *v) 319 static int show_partition(struct seq_file *part, void *v)
318 { 320 {
319 struct gendisk *sgp = v; 321 struct gendisk *sgp = v;
320 int n; 322 int n;
321 char buf[BDEVNAME_SIZE]; 323 char buf[BDEVNAME_SIZE];
322 324
323 if (&sgp->dev.node == block_class.devices.next) 325 if (&sgp->dev.node == block_class.devices.next)
324 seq_puts(part, "major minor #blocks name\n\n"); 326 seq_puts(part, "major minor #blocks name\n\n");
325 327
326 /* Don't show non-partitionable removeable devices or empty devices */ 328 /* Don't show non-partitionable removeable devices or empty devices */
327 if (!get_capacity(sgp) || 329 if (!get_capacity(sgp) ||
328 (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE))) 330 (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
329 return 0; 331 return 0;
330 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) 332 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
331 return 0; 333 return 0;
332 334
333 /* show the full disk and all non-0 size partitions of it */ 335 /* show the full disk and all non-0 size partitions of it */
334 seq_printf(part, "%4d %4d %10llu %s\n", 336 seq_printf(part, "%4d %4d %10llu %s\n",
335 sgp->major, sgp->first_minor, 337 sgp->major, sgp->first_minor,
336 (unsigned long long)get_capacity(sgp) >> 1, 338 (unsigned long long)get_capacity(sgp) >> 1,
337 disk_name(sgp, 0, buf)); 339 disk_name(sgp, 0, buf));
338 for (n = 0; n < sgp->minors - 1; n++) { 340 for (n = 0; n < sgp->minors - 1; n++) {
339 if (!sgp->part[n]) 341 if (!sgp->part[n])
340 continue; 342 continue;
341 if (sgp->part[n]->nr_sects == 0) 343 if (sgp->part[n]->nr_sects == 0)
342 continue; 344 continue;
343 seq_printf(part, "%4d %4d %10llu %s\n", 345 seq_printf(part, "%4d %4d %10llu %s\n",
344 sgp->major, n + 1 + sgp->first_minor, 346 sgp->major, n + 1 + sgp->first_minor,
345 (unsigned long long)sgp->part[n]->nr_sects >> 1 , 347 (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
346 disk_name(sgp, n + 1, buf)); 348 disk_name(sgp, n + 1, buf));
347 } 349 }
348 350
349 return 0; 351 return 0;
350 } 352 }
351 353
352 const struct seq_operations partitions_op = { 354 const struct seq_operations partitions_op = {
353 .start = part_start, 355 .start = part_start,
354 .next = part_next, 356 .next = part_next,
355 .stop = part_stop, 357 .stop = part_stop,
356 .show = show_partition 358 .show = show_partition
357 }; 359 };
358 #endif 360 #endif
359 361
360 362
361 static struct kobject *base_probe(dev_t devt, int *part, void *data) 363 static struct kobject *base_probe(dev_t devt, int *part, void *data)
362 { 364 {
363 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) 365 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
364 /* Make old-style 2.4 aliases work */ 366 /* Make old-style 2.4 aliases work */
365 request_module("block-major-%d", MAJOR(devt)); 367 request_module("block-major-%d", MAJOR(devt));
366 return NULL; 368 return NULL;
367 } 369 }
368 370
369 static int __init genhd_device_init(void) 371 static int __init genhd_device_init(void)
370 { 372 {
371 int error = class_register(&block_class); 373 int error = class_register(&block_class);
372 if (unlikely(error)) 374 if (unlikely(error))
373 return error; 375 return error;
374 bdev_map = kobj_map_init(base_probe, &block_class_lock); 376 bdev_map = kobj_map_init(base_probe, &block_class_lock);
375 blk_dev_init(); 377 blk_dev_init();
376 378
377 #ifndef CONFIG_SYSFS_DEPRECATED 379 #ifndef CONFIG_SYSFS_DEPRECATED
378 /* create top-level block dir */ 380 /* create top-level block dir */
379 block_depr = kobject_create_and_add("block", NULL); 381 block_depr = kobject_create_and_add("block", NULL);
380 #endif 382 #endif
381 return 0; 383 return 0;
382 } 384 }
383 385
384 subsys_initcall(genhd_device_init); 386 subsys_initcall(genhd_device_init);
385 387
386 static ssize_t disk_range_show(struct device *dev, 388 static ssize_t disk_range_show(struct device *dev,
387 struct device_attribute *attr, char *buf) 389 struct device_attribute *attr, char *buf)
388 { 390 {
389 struct gendisk *disk = dev_to_disk(dev); 391 struct gendisk *disk = dev_to_disk(dev);
390 392
391 return sprintf(buf, "%d\n", disk->minors); 393 return sprintf(buf, "%d\n", disk->minors);
392 } 394 }
393 395
394 static ssize_t disk_removable_show(struct device *dev, 396 static ssize_t disk_removable_show(struct device *dev,
395 struct device_attribute *attr, char *buf) 397 struct device_attribute *attr, char *buf)
396 { 398 {
397 struct gendisk *disk = dev_to_disk(dev); 399 struct gendisk *disk = dev_to_disk(dev);
398 400
399 return sprintf(buf, "%d\n", 401 return sprintf(buf, "%d\n",
400 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); 402 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
401 } 403 }
402 404
403 static ssize_t disk_ro_show(struct device *dev, 405 static ssize_t disk_ro_show(struct device *dev,
404 struct device_attribute *attr, char *buf) 406 struct device_attribute *attr, char *buf)
405 { 407 {
406 struct gendisk *disk = dev_to_disk(dev); 408 struct gendisk *disk = dev_to_disk(dev);
407 409
408 return sprintf(buf, "%d\n", disk->policy ? 1 : 0); 410 return sprintf(buf, "%d\n", disk->policy ? 1 : 0);
409 } 411 }
410 412
411 static ssize_t disk_size_show(struct device *dev, 413 static ssize_t disk_size_show(struct device *dev,
412 struct device_attribute *attr, char *buf) 414 struct device_attribute *attr, char *buf)
413 { 415 {
414 struct gendisk *disk = dev_to_disk(dev); 416 struct gendisk *disk = dev_to_disk(dev);
415 417
416 return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk)); 418 return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
417 } 419 }
418 420
419 static ssize_t disk_capability_show(struct device *dev, 421 static ssize_t disk_capability_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 422 struct device_attribute *attr, char *buf)
421 { 423 {
422 struct gendisk *disk = dev_to_disk(dev); 424 struct gendisk *disk = dev_to_disk(dev);
423 425
424 return sprintf(buf, "%x\n", disk->flags); 426 return sprintf(buf, "%x\n", disk->flags);
425 } 427 }
426 428
427 static ssize_t disk_stat_show(struct device *dev, 429 static ssize_t disk_stat_show(struct device *dev,
428 struct device_attribute *attr, char *buf) 430 struct device_attribute *attr, char *buf)
429 { 431 {
430 struct gendisk *disk = dev_to_disk(dev); 432 struct gendisk *disk = dev_to_disk(dev);
431 433
432 preempt_disable(); 434 preempt_disable();
433 disk_round_stats(disk); 435 disk_round_stats(disk);
434 preempt_enable(); 436 preempt_enable();
435 return sprintf(buf, 437 return sprintf(buf,
436 "%8lu %8lu %8llu %8u " 438 "%8lu %8lu %8llu %8u "
437 "%8lu %8lu %8llu %8u " 439 "%8lu %8lu %8llu %8u "
438 "%8u %8u %8u" 440 "%8u %8u %8u"
439 "\n", 441 "\n",
440 disk_stat_read(disk, ios[READ]), 442 disk_stat_read(disk, ios[READ]),
441 disk_stat_read(disk, merges[READ]), 443 disk_stat_read(disk, merges[READ]),
442 (unsigned long long)disk_stat_read(disk, sectors[READ]), 444 (unsigned long long)disk_stat_read(disk, sectors[READ]),
443 jiffies_to_msecs(disk_stat_read(disk, ticks[READ])), 445 jiffies_to_msecs(disk_stat_read(disk, ticks[READ])),
444 disk_stat_read(disk, ios[WRITE]), 446 disk_stat_read(disk, ios[WRITE]),
445 disk_stat_read(disk, merges[WRITE]), 447 disk_stat_read(disk, merges[WRITE]),
446 (unsigned long long)disk_stat_read(disk, sectors[WRITE]), 448 (unsigned long long)disk_stat_read(disk, sectors[WRITE]),
447 jiffies_to_msecs(disk_stat_read(disk, ticks[WRITE])), 449 jiffies_to_msecs(disk_stat_read(disk, ticks[WRITE])),
448 disk->in_flight, 450 disk->in_flight,
449 jiffies_to_msecs(disk_stat_read(disk, io_ticks)), 451 jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
450 jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); 452 jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
451 } 453 }
452 454
453 #ifdef CONFIG_FAIL_MAKE_REQUEST 455 #ifdef CONFIG_FAIL_MAKE_REQUEST
454 static ssize_t disk_fail_show(struct device *dev, 456 static ssize_t disk_fail_show(struct device *dev,
455 struct device_attribute *attr, char *buf) 457 struct device_attribute *attr, char *buf)
456 { 458 {
457 struct gendisk *disk = dev_to_disk(dev); 459 struct gendisk *disk = dev_to_disk(dev);
458 460
459 return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0); 461 return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
460 } 462 }
461 463
462 static ssize_t disk_fail_store(struct device *dev, 464 static ssize_t disk_fail_store(struct device *dev,
463 struct device_attribute *attr, 465 struct device_attribute *attr,
464 const char *buf, size_t count) 466 const char *buf, size_t count)
465 { 467 {
466 struct gendisk *disk = dev_to_disk(dev); 468 struct gendisk *disk = dev_to_disk(dev);
467 int i; 469 int i;
468 470
469 if (count > 0 && sscanf(buf, "%d", &i) > 0) { 471 if (count > 0 && sscanf(buf, "%d", &i) > 0) {
470 if (i == 0) 472 if (i == 0)
471 disk->flags &= ~GENHD_FL_FAIL; 473 disk->flags &= ~GENHD_FL_FAIL;
472 else 474 else
473 disk->flags |= GENHD_FL_FAIL; 475 disk->flags |= GENHD_FL_FAIL;
474 } 476 }
475 477
476 return count; 478 return count;
477 } 479 }
478 480
479 #endif 481 #endif
480 482
481 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 483 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
482 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 484 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
483 static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); 485 static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
484 static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL); 486 static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
485 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 487 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
486 static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL); 488 static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
487 #ifdef CONFIG_FAIL_MAKE_REQUEST 489 #ifdef CONFIG_FAIL_MAKE_REQUEST
488 static struct device_attribute dev_attr_fail = 490 static struct device_attribute dev_attr_fail =
489 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store); 491 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
490 #endif 492 #endif
491 493
492 static struct attribute *disk_attrs[] = { 494 static struct attribute *disk_attrs[] = {
493 &dev_attr_range.attr, 495 &dev_attr_range.attr,
494 &dev_attr_removable.attr, 496 &dev_attr_removable.attr,
495 &dev_attr_ro.attr, 497 &dev_attr_ro.attr,
496 &dev_attr_size.attr, 498 &dev_attr_size.attr,
497 &dev_attr_capability.attr, 499 &dev_attr_capability.attr,
498 &dev_attr_stat.attr, 500 &dev_attr_stat.attr,
499 #ifdef CONFIG_FAIL_MAKE_REQUEST 501 #ifdef CONFIG_FAIL_MAKE_REQUEST
500 &dev_attr_fail.attr, 502 &dev_attr_fail.attr,
501 #endif 503 #endif
502 NULL 504 NULL
503 }; 505 };
504 506
505 static struct attribute_group disk_attr_group = { 507 static struct attribute_group disk_attr_group = {
506 .attrs = disk_attrs, 508 .attrs = disk_attrs,
507 }; 509 };
508 510
509 static struct attribute_group *disk_attr_groups[] = { 511 static struct attribute_group *disk_attr_groups[] = {
510 &disk_attr_group, 512 &disk_attr_group,
511 NULL 513 NULL
512 }; 514 };
513 515
514 static void disk_release(struct device *dev) 516 static void disk_release(struct device *dev)
515 { 517 {
516 struct gendisk *disk = dev_to_disk(dev); 518 struct gendisk *disk = dev_to_disk(dev);
517 519
518 kfree(disk->random); 520 kfree(disk->random);
519 kfree(disk->part); 521 kfree(disk->part);
520 free_disk_stats(disk); 522 free_disk_stats(disk);
521 kfree(disk); 523 kfree(disk);
522 } 524 }
523 struct class block_class = { 525 struct class block_class = {
524 .name = "block", 526 .name = "block",
525 }; 527 };
526 528
527 static struct device_type disk_type = { 529 static struct device_type disk_type = {
528 .name = "disk", 530 .name = "disk",
529 .groups = disk_attr_groups, 531 .groups = disk_attr_groups,
530 .release = disk_release, 532 .release = disk_release,
531 }; 533 };
532 534
533 /* 535 /*
534 * aggregate disk stat collector. Uses the same stats that the sysfs 536 * aggregate disk stat collector. Uses the same stats that the sysfs
535 * entries do, above, but makes them available through one seq_file. 537 * entries do, above, but makes them available through one seq_file.
536 * 538 *
537 * The output looks suspiciously like /proc/partitions with a bunch of 539 * The output looks suspiciously like /proc/partitions with a bunch of
538 * extra fields. 540 * extra fields.
539 */ 541 */
540 542
541 static void *diskstats_start(struct seq_file *part, loff_t *pos) 543 static void *diskstats_start(struct seq_file *part, loff_t *pos)
542 { 544 {
543 loff_t k = *pos; 545 loff_t k = *pos;
544 struct device *dev; 546 struct device *dev;
545 547
546 mutex_lock(&block_class_lock); 548 mutex_lock(&block_class_lock);
547 list_for_each_entry(dev, &block_class.devices, node) { 549 list_for_each_entry(dev, &block_class.devices, node) {
548 if (dev->type != &disk_type) 550 if (dev->type != &disk_type)
549 continue; 551 continue;
550 if (!k--) 552 if (!k--)
551 return dev_to_disk(dev); 553 return dev_to_disk(dev);
552 } 554 }
553 return NULL; 555 return NULL;
554 } 556 }
555 557
556 static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos) 558 static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
557 { 559 {
558 struct gendisk *gp = v; 560 struct gendisk *gp = v;
559 struct device *dev; 561 struct device *dev;
560 562
561 ++*pos; 563 ++*pos;
562 list_for_each_entry(dev, &gp->dev.node, node) { 564 list_for_each_entry(dev, &gp->dev.node, node) {
563 if (&dev->node == &block_class.devices) 565 if (&dev->node == &block_class.devices)
564 return NULL; 566 return NULL;
565 if (dev->type == &disk_type) 567 if (dev->type == &disk_type)
566 return dev_to_disk(dev); 568 return dev_to_disk(dev);
567 } 569 }
568 return NULL; 570 return NULL;
569 } 571 }
570 572
571 static void diskstats_stop(struct seq_file *part, void *v) 573 static void diskstats_stop(struct seq_file *part, void *v)
572 { 574 {
573 mutex_unlock(&block_class_lock); 575 mutex_unlock(&block_class_lock);
574 } 576 }
575 577
576 static int diskstats_show(struct seq_file *s, void *v) 578 static int diskstats_show(struct seq_file *s, void *v)
577 { 579 {
578 struct gendisk *gp = v; 580 struct gendisk *gp = v;
579 char buf[BDEVNAME_SIZE]; 581 char buf[BDEVNAME_SIZE];
580 int n = 0; 582 int n = 0;
581 583
582 /* 584 /*
583 if (&gp->dev.kobj.entry == block_class.devices.next) 585 if (&gp->dev.kobj.entry == block_class.devices.next)
584 seq_puts(s, "major minor name" 586 seq_puts(s, "major minor name"
585 " rio rmerge rsect ruse wio wmerge " 587 " rio rmerge rsect ruse wio wmerge "
586 "wsect wuse running use aveq" 588 "wsect wuse running use aveq"
587 "\n\n"); 589 "\n\n");
588 */ 590 */
589 591
590 preempt_disable(); 592 preempt_disable();
591 disk_round_stats(gp); 593 disk_round_stats(gp);
592 preempt_enable(); 594 preempt_enable();
593 seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n", 595 seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n",
594 gp->major, n + gp->first_minor, disk_name(gp, n, buf), 596 gp->major, n + gp->first_minor, disk_name(gp, n, buf),
595 disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]), 597 disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
596 (unsigned long long)disk_stat_read(gp, sectors[0]), 598 (unsigned long long)disk_stat_read(gp, sectors[0]),
597 jiffies_to_msecs(disk_stat_read(gp, ticks[0])), 599 jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
598 disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]), 600 disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
599 (unsigned long long)disk_stat_read(gp, sectors[1]), 601 (unsigned long long)disk_stat_read(gp, sectors[1]),
600 jiffies_to_msecs(disk_stat_read(gp, ticks[1])), 602 jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
601 gp->in_flight, 603 gp->in_flight,
602 jiffies_to_msecs(disk_stat_read(gp, io_ticks)), 604 jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
603 jiffies_to_msecs(disk_stat_read(gp, time_in_queue))); 605 jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
604 606
605 /* now show all non-0 size partitions of it */ 607 /* now show all non-0 size partitions of it */
606 for (n = 0; n < gp->minors - 1; n++) { 608 for (n = 0; n < gp->minors - 1; n++) {
607 struct hd_struct *hd = gp->part[n]; 609 struct hd_struct *hd = gp->part[n];
608 610
609 if (!hd || !hd->nr_sects) 611 if (!hd || !hd->nr_sects)
610 continue; 612 continue;
611 613
612 preempt_disable(); 614 preempt_disable();
613 part_round_stats(hd); 615 part_round_stats(hd);
614 preempt_enable(); 616 preempt_enable();
615 seq_printf(s, "%4d %4d %s %lu %lu %llu " 617 seq_printf(s, "%4d %4d %s %lu %lu %llu "
616 "%u %lu %lu %llu %u %u %u %u\n", 618 "%u %lu %lu %llu %u %u %u %u\n",
617 gp->major, n + gp->first_minor + 1, 619 gp->major, n + gp->first_minor + 1,
618 disk_name(gp, n + 1, buf), 620 disk_name(gp, n + 1, buf),
619 part_stat_read(hd, ios[0]), 621 part_stat_read(hd, ios[0]),
620 part_stat_read(hd, merges[0]), 622 part_stat_read(hd, merges[0]),
621 (unsigned long long)part_stat_read(hd, sectors[0]), 623 (unsigned long long)part_stat_read(hd, sectors[0]),
622 jiffies_to_msecs(part_stat_read(hd, ticks[0])), 624 jiffies_to_msecs(part_stat_read(hd, ticks[0])),
623 part_stat_read(hd, ios[1]), 625 part_stat_read(hd, ios[1]),
624 part_stat_read(hd, merges[1]), 626 part_stat_read(hd, merges[1]),
625 (unsigned long long)part_stat_read(hd, sectors[1]), 627 (unsigned long long)part_stat_read(hd, sectors[1]),
626 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 628 jiffies_to_msecs(part_stat_read(hd, ticks[1])),
627 hd->in_flight, 629 hd->in_flight,
628 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 630 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
629 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 631 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
630 ); 632 );
631 } 633 }
632 634
633 return 0; 635 return 0;
634 } 636 }
635 637
636 const struct seq_operations diskstats_op = { 638 const struct seq_operations diskstats_op = {
637 .start = diskstats_start, 639 .start = diskstats_start,
638 .next = diskstats_next, 640 .next = diskstats_next,
639 .stop = diskstats_stop, 641 .stop = diskstats_stop,
640 .show = diskstats_show 642 .show = diskstats_show
641 }; 643 };
642 644
643 static void media_change_notify_thread(struct work_struct *work) 645 static void media_change_notify_thread(struct work_struct *work)
644 { 646 {
645 struct gendisk *gd = container_of(work, struct gendisk, async_notify); 647 struct gendisk *gd = container_of(work, struct gendisk, async_notify);
646 char event[] = "MEDIA_CHANGE=1"; 648 char event[] = "MEDIA_CHANGE=1";
647 char *envp[] = { event, NULL }; 649 char *envp[] = { event, NULL };
648 650
649 /* 651 /*
650 * set enviroment vars to indicate which event this is for 652 * set enviroment vars to indicate which event this is for
651 * so that user space will know to go check the media status. 653 * so that user space will know to go check the media status.
652 */ 654 */
653 kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp); 655 kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
654 put_device(gd->driverfs_dev); 656 put_device(gd->driverfs_dev);
655 } 657 }
656 658
657 #if 0 659 #if 0
658 void genhd_media_change_notify(struct gendisk *disk) 660 void genhd_media_change_notify(struct gendisk *disk)
659 { 661 {
660 get_device(disk->driverfs_dev); 662 get_device(disk->driverfs_dev);
661 schedule_work(&disk->async_notify); 663 schedule_work(&disk->async_notify);
662 } 664 }
663 EXPORT_SYMBOL_GPL(genhd_media_change_notify); 665 EXPORT_SYMBOL_GPL(genhd_media_change_notify);
664 #endif /* 0 */ 666 #endif /* 0 */
665 667
666 dev_t blk_lookup_devt(const char *name, int part) 668 dev_t blk_lookup_devt(const char *name, int part)
667 { 669 {
668 struct device *dev; 670 struct device *dev;
669 dev_t devt = MKDEV(0, 0); 671 dev_t devt = MKDEV(0, 0);
670 672
671 mutex_lock(&block_class_lock); 673 mutex_lock(&block_class_lock);
672 list_for_each_entry(dev, &block_class.devices, node) { 674 list_for_each_entry(dev, &block_class.devices, node) {
673 if (dev->type != &disk_type) 675 if (dev->type != &disk_type)
674 continue; 676 continue;
675 if (strcmp(dev->bus_id, name) == 0) { 677 if (strcmp(dev->bus_id, name) == 0) {
676 struct gendisk *disk = dev_to_disk(dev); 678 struct gendisk *disk = dev_to_disk(dev);
677 679
678 if (part < disk->minors) 680 if (part < disk->minors)
679 devt = MKDEV(MAJOR(dev->devt), 681 devt = MKDEV(MAJOR(dev->devt),
680 MINOR(dev->devt) + part); 682 MINOR(dev->devt) + part);
681 break; 683 break;
682 } 684 }
683 } 685 }
684 mutex_unlock(&block_class_lock); 686 mutex_unlock(&block_class_lock);
685 687
686 return devt; 688 return devt;
687 } 689 }
688 EXPORT_SYMBOL(blk_lookup_devt); 690 EXPORT_SYMBOL(blk_lookup_devt);
689 691
690 struct gendisk *alloc_disk(int minors) 692 struct gendisk *alloc_disk(int minors)
691 { 693 {
692 return alloc_disk_node(minors, -1); 694 return alloc_disk_node(minors, -1);
693 } 695 }
694 696
695 struct gendisk *alloc_disk_node(int minors, int node_id) 697 struct gendisk *alloc_disk_node(int minors, int node_id)
696 { 698 {
697 struct gendisk *disk; 699 struct gendisk *disk;
698 700
699 disk = kmalloc_node(sizeof(struct gendisk), 701 disk = kmalloc_node(sizeof(struct gendisk),
700 GFP_KERNEL | __GFP_ZERO, node_id); 702 GFP_KERNEL | __GFP_ZERO, node_id);
701 if (disk) { 703 if (disk) {
702 if (!init_disk_stats(disk)) { 704 if (!init_disk_stats(disk)) {
703 kfree(disk); 705 kfree(disk);
704 return NULL; 706 return NULL;
705 } 707 }
706 if (minors > 1) { 708 if (minors > 1) {
707 int size = (minors - 1) * sizeof(struct hd_struct *); 709 int size = (minors - 1) * sizeof(struct hd_struct *);
708 disk->part = kmalloc_node(size, 710 disk->part = kmalloc_node(size,
709 GFP_KERNEL | __GFP_ZERO, node_id); 711 GFP_KERNEL | __GFP_ZERO, node_id);
710 if (!disk->part) { 712 if (!disk->part) {
711 free_disk_stats(disk); 713 free_disk_stats(disk);
712 kfree(disk); 714 kfree(disk);
713 return NULL; 715 return NULL;
714 } 716 }
715 } 717 }
716 disk->minors = minors; 718 disk->minors = minors;
717 rand_initialize_disk(disk); 719 rand_initialize_disk(disk);
718 disk->dev.class = &block_class; 720 disk->dev.class = &block_class;
719 disk->dev.type = &disk_type; 721 disk->dev.type = &disk_type;
720 device_initialize(&disk->dev); 722 device_initialize(&disk->dev);
721 INIT_WORK(&disk->async_notify, 723 INIT_WORK(&disk->async_notify,
722 media_change_notify_thread); 724 media_change_notify_thread);
723 } 725 }
724 return disk; 726 return disk;
725 } 727 }
726 728
727 EXPORT_SYMBOL(alloc_disk); 729 EXPORT_SYMBOL(alloc_disk);
728 EXPORT_SYMBOL(alloc_disk_node); 730 EXPORT_SYMBOL(alloc_disk_node);
729 731
730 struct kobject *get_disk(struct gendisk *disk) 732 struct kobject *get_disk(struct gendisk *disk)
731 { 733 {
732 struct module *owner; 734 struct module *owner;
733 struct kobject *kobj; 735 struct kobject *kobj;
734 736
735 if (!disk->fops) 737 if (!disk->fops)
736 return NULL; 738 return NULL;
737 owner = disk->fops->owner; 739 owner = disk->fops->owner;
738 if (owner && !try_module_get(owner)) 740 if (owner && !try_module_get(owner))
739 return NULL; 741 return NULL;
740 kobj = kobject_get(&disk->dev.kobj); 742 kobj = kobject_get(&disk->dev.kobj);
741 if (kobj == NULL) { 743 if (kobj == NULL) {
742 module_put(owner); 744 module_put(owner);
743 return NULL; 745 return NULL;
744 } 746 }
745 return kobj; 747 return kobj;
746 748
747 } 749 }
748 750
749 EXPORT_SYMBOL(get_disk); 751 EXPORT_SYMBOL(get_disk);
750 752
751 void put_disk(struct gendisk *disk) 753 void put_disk(struct gendisk *disk)
752 { 754 {
753 if (disk) 755 if (disk)
754 kobject_put(&disk->dev.kobj); 756 kobject_put(&disk->dev.kobj);
755 } 757 }
756 758
757 EXPORT_SYMBOL(put_disk); 759 EXPORT_SYMBOL(put_disk);
758 760
759 void set_device_ro(struct block_device *bdev, int flag) 761 void set_device_ro(struct block_device *bdev, int flag)
760 { 762 {
761 if (bdev->bd_contains != bdev) 763 if (bdev->bd_contains != bdev)
762 bdev->bd_part->policy = flag; 764 bdev->bd_part->policy = flag;
763 else 765 else
764 bdev->bd_disk->policy = flag; 766 bdev->bd_disk->policy = flag;
765 } 767 }
766 768
767 EXPORT_SYMBOL(set_device_ro); 769 EXPORT_SYMBOL(set_device_ro);
768 770
769 void set_disk_ro(struct gendisk *disk, int flag) 771 void set_disk_ro(struct gendisk *disk, int flag)
770 { 772 {
771 int i; 773 int i;
772 disk->policy = flag; 774 disk->policy = flag;
773 for (i = 0; i < disk->minors - 1; i++) 775 for (i = 0; i < disk->minors - 1; i++)
774 if (disk->part[i]) disk->part[i]->policy = flag; 776 if (disk->part[i]) disk->part[i]->policy = flag;
775 } 777 }
776 778
777 EXPORT_SYMBOL(set_disk_ro); 779 EXPORT_SYMBOL(set_disk_ro);
778 780
779 int bdev_read_only(struct block_device *bdev) 781 int bdev_read_only(struct block_device *bdev)
780 { 782 {
781 if (!bdev) 783 if (!bdev)
782 return 0; 784 return 0;
783 else if (bdev->bd_contains != bdev) 785 else if (bdev->bd_contains != bdev)
784 return bdev->bd_part->policy; 786 return bdev->bd_part->policy;
785 else 787 else
786 return bdev->bd_disk->policy; 788 return bdev->bd_disk->policy;
787 } 789 }
788 790
789 EXPORT_SYMBOL(bdev_read_only); 791 EXPORT_SYMBOL(bdev_read_only);
790 792
791 int invalidate_partition(struct gendisk *disk, int index) 793 int invalidate_partition(struct gendisk *disk, int index)
792 { 794 {
793 int res = 0; 795 int res = 0;
794 struct block_device *bdev = bdget_disk(disk, index); 796 struct block_device *bdev = bdget_disk(disk, index);
795 if (bdev) { 797 if (bdev) {
796 fsync_bdev(bdev); 798 fsync_bdev(bdev);
797 res = __invalidate_device(bdev); 799 res = __invalidate_device(bdev);
798 bdput(bdev); 800 bdput(bdev);
799 } 801 }
800 return res; 802 return res;
801 } 803 }
802 804
803 EXPORT_SYMBOL(invalidate_partition); 805 EXPORT_SYMBOL(invalidate_partition);
804 806
1 /* 1 /*
2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope that it will be useful, 8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * 10 *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public Licens 14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 * 17 *
18 */ 18 */
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/blkdev.h> 23 #include <linux/blkdev.h>
24 #include <linux/capability.h> 24 #include <linux/capability.h>
25 #include <linux/completion.h> 25 #include <linux/completion.h>
26 #include <linux/cdrom.h> 26 #include <linux/cdrom.h>
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <linux/times.h> 28 #include <linux/times.h>
29 #include <asm/uaccess.h> 29 #include <asm/uaccess.h>
30 30
31 #include <scsi/scsi.h> 31 #include <scsi/scsi.h>
32 #include <scsi/scsi_ioctl.h> 32 #include <scsi/scsi_ioctl.h>
33 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_cmnd.h>
34 34
35 /* Command group 3 is reserved and should never be used. */ 35 /* Command group 3 is reserved and should never be used. */
36 const unsigned char scsi_command_size_tbl[8] = 36 const unsigned char scsi_command_size_tbl[8] =
37 { 37 {
38 6, 10, 10, 12, 38 6, 10, 10, 12,
39 16, 12, 10, 10 39 16, 12, 10, 10
40 }; 40 };
41 EXPORT_SYMBOL(scsi_command_size_tbl); 41 EXPORT_SYMBOL(scsi_command_size_tbl);
42 42
43 #include <scsi/sg.h> 43 #include <scsi/sg.h>
44 44
45 static int sg_get_version(int __user *p) 45 static int sg_get_version(int __user *p)
46 { 46 {
47 static const int sg_version_num = 30527; 47 static const int sg_version_num = 30527;
48 return put_user(sg_version_num, p); 48 return put_user(sg_version_num, p);
49 } 49 }
50 50
51 static int scsi_get_idlun(struct request_queue *q, int __user *p) 51 static int scsi_get_idlun(struct request_queue *q, int __user *p)
52 { 52 {
53 return put_user(0, p); 53 return put_user(0, p);
54 } 54 }
55 55
56 static int scsi_get_bus(struct request_queue *q, int __user *p) 56 static int scsi_get_bus(struct request_queue *q, int __user *p)
57 { 57 {
58 return put_user(0, p); 58 return put_user(0, p);
59 } 59 }
60 60
61 static int sg_get_timeout(struct request_queue *q) 61 static int sg_get_timeout(struct request_queue *q)
62 { 62 {
63 return q->sg_timeout / (HZ / USER_HZ); 63 return q->sg_timeout / (HZ / USER_HZ);
64 } 64 }
65 65
66 static int sg_set_timeout(struct request_queue *q, int __user *p) 66 static int sg_set_timeout(struct request_queue *q, int __user *p)
67 { 67 {
68 int timeout, err = get_user(timeout, p); 68 int timeout, err = get_user(timeout, p);
69 69
70 if (!err) 70 if (!err)
71 q->sg_timeout = timeout * (HZ / USER_HZ); 71 q->sg_timeout = timeout * (HZ / USER_HZ);
72 72
73 return err; 73 return err;
74 } 74 }
75 75
76 static int sg_get_reserved_size(struct request_queue *q, int __user *p) 76 static int sg_get_reserved_size(struct request_queue *q, int __user *p)
77 { 77 {
78 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 78 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
79 79
80 return put_user(val, p); 80 return put_user(val, p);
81 } 81 }
82 82
83 static int sg_set_reserved_size(struct request_queue *q, int __user *p) 83 static int sg_set_reserved_size(struct request_queue *q, int __user *p)
84 { 84 {
85 int size, err = get_user(size, p); 85 int size, err = get_user(size, p);
86 86
87 if (err) 87 if (err)
88 return err; 88 return err;
89 89
90 if (size < 0) 90 if (size < 0)
91 return -EINVAL; 91 return -EINVAL;
92 if (size > (q->max_sectors << 9)) 92 if (size > (q->max_sectors << 9))
93 size = q->max_sectors << 9; 93 size = q->max_sectors << 9;
94 94
95 q->sg_reserved_size = size; 95 q->sg_reserved_size = size;
96 return 0; 96 return 0;
97 } 97 }
98 98
99 /* 99 /*
100 * will always return that we are ATAPI even for a real SCSI drive, I'm not 100 * will always return that we are ATAPI even for a real SCSI drive, I'm not
101 * so sure this is worth doing anything about (why would you care??) 101 * so sure this is worth doing anything about (why would you care??)
102 */ 102 */
103 static int sg_emulated_host(struct request_queue *q, int __user *p) 103 static int sg_emulated_host(struct request_queue *q, int __user *p)
104 { 104 {
105 return put_user(1, p); 105 return put_user(1, p);
106 } 106 }
107 107
108 #define CMD_READ_SAFE 0x01
109 #define CMD_WRITE_SAFE 0x02
110 #define CMD_WARNED 0x04
111 #define safe_for_read(cmd) [cmd] = CMD_READ_SAFE
112 #define safe_for_write(cmd) [cmd] = CMD_WRITE_SAFE
113
114 int blk_verify_command(unsigned char *cmd, int has_write_perm)
115 {
116 static unsigned char cmd_type[256] = {
117
118 /* Basic read-only commands */
119 safe_for_read(TEST_UNIT_READY),
120 safe_for_read(REQUEST_SENSE),
121 safe_for_read(READ_6),
122 safe_for_read(READ_10),
123 safe_for_read(READ_12),
124 safe_for_read(READ_16),
125 safe_for_read(READ_BUFFER),
126 safe_for_read(READ_DEFECT_DATA),
127 safe_for_read(READ_LONG),
128 safe_for_read(INQUIRY),
129 safe_for_read(MODE_SENSE),
130 safe_for_read(MODE_SENSE_10),
131 safe_for_read(LOG_SENSE),
132 safe_for_read(START_STOP),
133 safe_for_read(GPCMD_VERIFY_10),
134 safe_for_read(VERIFY_16),
135
136 /* Audio CD commands */
137 safe_for_read(GPCMD_PLAY_CD),
138 safe_for_read(GPCMD_PLAY_AUDIO_10),
139 safe_for_read(GPCMD_PLAY_AUDIO_MSF),
140 safe_for_read(GPCMD_PLAY_AUDIO_TI),
141 safe_for_read(GPCMD_PAUSE_RESUME),
142
143 /* CD/DVD data reading */
144 safe_for_read(GPCMD_READ_BUFFER_CAPACITY),
145 safe_for_read(GPCMD_READ_CD),
146 safe_for_read(GPCMD_READ_CD_MSF),
147 safe_for_read(GPCMD_READ_DISC_INFO),
148 safe_for_read(GPCMD_READ_CDVD_CAPACITY),
149 safe_for_read(GPCMD_READ_DVD_STRUCTURE),
150 safe_for_read(GPCMD_READ_HEADER),
151 safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
152 safe_for_read(GPCMD_READ_SUBCHANNEL),
153 safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
154 safe_for_read(GPCMD_REPORT_KEY),
155 safe_for_read(GPCMD_SCAN),
156 safe_for_read(GPCMD_GET_CONFIGURATION),
157 safe_for_read(GPCMD_READ_FORMAT_CAPACITIES),
158 safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION),
159 safe_for_read(GPCMD_GET_PERFORMANCE),
160 safe_for_read(GPCMD_SEEK),
161 safe_for_read(GPCMD_STOP_PLAY_SCAN),
162
163 /* Basic writing commands */
164 safe_for_write(WRITE_6),
165 safe_for_write(WRITE_10),
166 safe_for_write(WRITE_VERIFY),
167 safe_for_write(WRITE_12),
168 safe_for_write(WRITE_VERIFY_12),
169 safe_for_write(WRITE_16),
170 safe_for_write(WRITE_LONG),
171 safe_for_write(WRITE_LONG_2),
172 safe_for_write(ERASE),
173 safe_for_write(GPCMD_MODE_SELECT_10),
174 safe_for_write(MODE_SELECT),
175 safe_for_write(LOG_SELECT),
176 safe_for_write(GPCMD_BLANK),
177 safe_for_write(GPCMD_CLOSE_TRACK),
178 safe_for_write(GPCMD_FLUSH_CACHE),
179 safe_for_write(GPCMD_FORMAT_UNIT),
180 safe_for_write(GPCMD_REPAIR_RZONE_TRACK),
181 safe_for_write(GPCMD_RESERVE_RZONE_TRACK),
182 safe_for_write(GPCMD_SEND_DVD_STRUCTURE),
183 safe_for_write(GPCMD_SEND_EVENT),
184 safe_for_write(GPCMD_SEND_KEY),
185 safe_for_write(GPCMD_SEND_OPC),
186 safe_for_write(GPCMD_SEND_CUE_SHEET),
187 safe_for_write(GPCMD_SET_SPEED),
188 safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL),
189 safe_for_write(GPCMD_LOAD_UNLOAD),
190 safe_for_write(GPCMD_SET_STREAMING),
191 };
192 unsigned char type = cmd_type[cmd[0]];
193
194 /* Anybody who can open the device can do a read-safe command */
195 if (type & CMD_READ_SAFE)
196 return 0;
197
198 /* Write-safe commands just require a writable open.. */
199 if ((type & CMD_WRITE_SAFE) && has_write_perm)
200 return 0;
201
202 /* And root can do any command.. */
203 if (capable(CAP_SYS_RAWIO))
204 return 0;
205
206 if (!type) {
207 cmd_type[cmd[0]] = CMD_WARNED;
208 printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]);
209 }
210
211 /* Otherwise fail it with an "Operation not permitted" */
212 return -EPERM;
213 }
214 EXPORT_SYMBOL_GPL(blk_verify_command);
215
216 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, 108 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
217 struct sg_io_hdr *hdr, int has_write_perm) 109 struct sg_io_hdr *hdr, struct file *file)
218 { 110 {
219 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) 111 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
220 return -EFAULT; 112 return -EFAULT;
221 if (blk_verify_command(rq->cmd, has_write_perm)) 113 if (blk_verify_command(file, rq->cmd))
222 return -EPERM; 114 return -EPERM;
223 115
224 /* 116 /*
225 * fill in request structure 117 * fill in request structure
226 */ 118 */
227 rq->cmd_len = hdr->cmd_len; 119 rq->cmd_len = hdr->cmd_len;
228 rq->cmd_type = REQ_TYPE_BLOCK_PC; 120 rq->cmd_type = REQ_TYPE_BLOCK_PC;
229 121
230 rq->timeout = msecs_to_jiffies(hdr->timeout); 122 rq->timeout = msecs_to_jiffies(hdr->timeout);
231 if (!rq->timeout) 123 if (!rq->timeout)
232 rq->timeout = q->sg_timeout; 124 rq->timeout = q->sg_timeout;
233 if (!rq->timeout) 125 if (!rq->timeout)
234 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 126 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
235 127
236 return 0; 128 return 0;
237 } 129 }
238 130
239 /* 131 /*
240 * unmap a request that was previously mapped to this sg_io_hdr. handles 132 * unmap a request that was previously mapped to this sg_io_hdr. handles
241 * both sg and non-sg sg_io_hdr. 133 * both sg and non-sg sg_io_hdr.
242 */ 134 */
243 static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr) 135 static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
244 { 136 {
245 blk_rq_unmap_user(rq->bio); 137 blk_rq_unmap_user(rq->bio);
246 blk_put_request(rq); 138 blk_put_request(rq);
247 return 0; 139 return 0;
248 } 140 }
249 141
250 static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, 142 static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
251 struct bio *bio) 143 struct bio *bio)
252 { 144 {
253 int r, ret = 0; 145 int r, ret = 0;
254 146
255 /* 147 /*
256 * fill in all the output members 148 * fill in all the output members
257 */ 149 */
258 hdr->status = rq->errors & 0xff; 150 hdr->status = rq->errors & 0xff;
259 hdr->masked_status = status_byte(rq->errors); 151 hdr->masked_status = status_byte(rq->errors);
260 hdr->msg_status = msg_byte(rq->errors); 152 hdr->msg_status = msg_byte(rq->errors);
261 hdr->host_status = host_byte(rq->errors); 153 hdr->host_status = host_byte(rq->errors);
262 hdr->driver_status = driver_byte(rq->errors); 154 hdr->driver_status = driver_byte(rq->errors);
263 hdr->info = 0; 155 hdr->info = 0;
264 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 156 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
265 hdr->info |= SG_INFO_CHECK; 157 hdr->info |= SG_INFO_CHECK;
266 hdr->resid = rq->data_len; 158 hdr->resid = rq->data_len;
267 hdr->sb_len_wr = 0; 159 hdr->sb_len_wr = 0;
268 160
269 if (rq->sense_len && hdr->sbp) { 161 if (rq->sense_len && hdr->sbp) {
270 int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); 162 int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
271 163
272 if (!copy_to_user(hdr->sbp, rq->sense, len)) 164 if (!copy_to_user(hdr->sbp, rq->sense, len))
273 hdr->sb_len_wr = len; 165 hdr->sb_len_wr = len;
274 else 166 else
275 ret = -EFAULT; 167 ret = -EFAULT;
276 } 168 }
277 169
278 rq->bio = bio; 170 rq->bio = bio;
279 r = blk_unmap_sghdr_rq(rq, hdr); 171 r = blk_unmap_sghdr_rq(rq, hdr);
280 if (ret) 172 if (ret)
281 r = ret; 173 r = ret;
282 174
283 return r; 175 return r;
284 } 176 }
285 177
286 static int sg_io(struct file *file, struct request_queue *q, 178 static int sg_io(struct file *file, struct request_queue *q,
287 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 179 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
288 { 180 {
289 unsigned long start_time; 181 unsigned long start_time;
290 int writing = 0, ret = 0, has_write_perm = 0; 182 int writing = 0, ret = 0;
291 struct request *rq; 183 struct request *rq;
292 char sense[SCSI_SENSE_BUFFERSIZE]; 184 char sense[SCSI_SENSE_BUFFERSIZE];
293 struct bio *bio; 185 struct bio *bio;
294 186
295 if (hdr->interface_id != 'S') 187 if (hdr->interface_id != 'S')
296 return -EINVAL; 188 return -EINVAL;
297 if (hdr->cmd_len > BLK_MAX_CDB) 189 if (hdr->cmd_len > BLK_MAX_CDB)
298 return -EINVAL; 190 return -EINVAL;
299 191
300 if (hdr->dxfer_len > (q->max_hw_sectors << 9)) 192 if (hdr->dxfer_len > (q->max_hw_sectors << 9))
301 return -EIO; 193 return -EIO;
302 194
303 if (hdr->dxfer_len) 195 if (hdr->dxfer_len)
304 switch (hdr->dxfer_direction) { 196 switch (hdr->dxfer_direction) {
305 default: 197 default:
306 return -EINVAL; 198 return -EINVAL;
307 case SG_DXFER_TO_DEV: 199 case SG_DXFER_TO_DEV:
308 writing = 1; 200 writing = 1;
309 break; 201 break;
310 case SG_DXFER_TO_FROM_DEV: 202 case SG_DXFER_TO_FROM_DEV:
311 case SG_DXFER_FROM_DEV: 203 case SG_DXFER_FROM_DEV:
312 break; 204 break;
313 } 205 }
314 206
315 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 207 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
316 if (!rq) 208 if (!rq)
317 return -ENOMEM; 209 return -ENOMEM;
318 210
319 if (file) 211 if (blk_fill_sghdr_rq(q, rq, hdr, file)) {
320 has_write_perm = file->f_mode & FMODE_WRITE;
321
322 if (blk_fill_sghdr_rq(q, rq, hdr, has_write_perm)) {
323 blk_put_request(rq); 212 blk_put_request(rq);
324 return -EFAULT; 213 return -EFAULT;
325 } 214 }
326 215
327 if (hdr->iovec_count) { 216 if (hdr->iovec_count) {
328 const int size = sizeof(struct sg_iovec) * hdr->iovec_count; 217 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
329 struct sg_iovec *iov; 218 struct sg_iovec *iov;
330 219
331 iov = kmalloc(size, GFP_KERNEL); 220 iov = kmalloc(size, GFP_KERNEL);
332 if (!iov) { 221 if (!iov) {
333 ret = -ENOMEM; 222 ret = -ENOMEM;
334 goto out; 223 goto out;
335 } 224 }
336 225
337 if (copy_from_user(iov, hdr->dxferp, size)) { 226 if (copy_from_user(iov, hdr->dxferp, size)) {
338 kfree(iov); 227 kfree(iov);
339 ret = -EFAULT; 228 ret = -EFAULT;
340 goto out; 229 goto out;
341 } 230 }
342 231
343 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, 232 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
344 hdr->dxfer_len); 233 hdr->dxfer_len);
345 kfree(iov); 234 kfree(iov);
346 } else if (hdr->dxfer_len) 235 } else if (hdr->dxfer_len)
347 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); 236 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
348 237
349 if (ret) 238 if (ret)
350 goto out; 239 goto out;
351 240
352 bio = rq->bio; 241 bio = rq->bio;
353 memset(sense, 0, sizeof(sense)); 242 memset(sense, 0, sizeof(sense));
354 rq->sense = sense; 243 rq->sense = sense;
355 rq->sense_len = 0; 244 rq->sense_len = 0;
356 rq->retries = 0; 245 rq->retries = 0;
357 246
358 start_time = jiffies; 247 start_time = jiffies;
359 248
360 /* ignore return value. All information is passed back to caller 249 /* ignore return value. All information is passed back to caller
361 * (if he doesn't check that is his problem). 250 * (if he doesn't check that is his problem).
362 * N.B. a non-zero SCSI status is _not_ necessarily an error. 251 * N.B. a non-zero SCSI status is _not_ necessarily an error.
363 */ 252 */
364 blk_execute_rq(q, bd_disk, rq, 0); 253 blk_execute_rq(q, bd_disk, rq, 0);
365 254
366 hdr->duration = jiffies_to_msecs(jiffies - start_time); 255 hdr->duration = jiffies_to_msecs(jiffies - start_time);
367 256
368 return blk_complete_sghdr_rq(rq, hdr, bio); 257 return blk_complete_sghdr_rq(rq, hdr, bio);
369 out: 258 out:
370 blk_put_request(rq); 259 blk_put_request(rq);
371 return ret; 260 return ret;
372 } 261 }
373 262
374 /** 263 /**
375 * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl 264 * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
376 * @file: file this ioctl operates on (optional) 265 * @file: file this ioctl operates on (optional)
377 * @q: request queue to send scsi commands down 266 * @q: request queue to send scsi commands down
378 * @disk: gendisk to operate on (option) 267 * @disk: gendisk to operate on (option)
379 * @sic: userspace structure describing the command to perform 268 * @sic: userspace structure describing the command to perform
380 * 269 *
381 * Send down the scsi command described by @sic to the device below 270 * Send down the scsi command described by @sic to the device below
382 * the request queue @q. If @file is non-NULL it's used to perform 271 * the request queue @q. If @file is non-NULL it's used to perform
383 * fine-grained permission checks that allow users to send down 272 * fine-grained permission checks that allow users to send down
384 * non-destructive SCSI commands. If the caller has a struct gendisk 273 * non-destructive SCSI commands. If the caller has a struct gendisk
385 * available it should be passed in as @disk to allow the low level 274 * available it should be passed in as @disk to allow the low level
386 * driver to use the information contained in it. A non-NULL @disk 275 * driver to use the information contained in it. A non-NULL @disk
387 * is only allowed if the caller knows that the low level driver doesn't 276 * is only allowed if the caller knows that the low level driver doesn't
388 * need it (e.g. in the scsi subsystem). 277 * need it (e.g. in the scsi subsystem).
389 * 278 *
390 * Notes: 279 * Notes:
391 * - This interface is deprecated - users should use the SG_IO 280 * - This interface is deprecated - users should use the SG_IO
392 * interface instead, as this is a more flexible approach to 281 * interface instead, as this is a more flexible approach to
393 * performing SCSI commands on a device. 282 * performing SCSI commands on a device.
394 * - The SCSI command length is determined by examining the 1st byte 283 * - The SCSI command length is determined by examining the 1st byte
395 * of the given command. There is no way to override this. 284 * of the given command. There is no way to override this.
396 * - Data transfers are limited to PAGE_SIZE 285 * - Data transfers are limited to PAGE_SIZE
397 * - The length (x + y) must be at least OMAX_SB_LEN bytes long to 286 * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
398 * accommodate the sense buffer when an error occurs. 287 * accommodate the sense buffer when an error occurs.
399 * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that 288 * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
400 * old code will not be surprised. 289 * old code will not be surprised.
401 * - If a Unix error occurs (e.g. ENOMEM) then the user will receive 290 * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
402 * a negative return and the Unix error code in 'errno'. 291 * a negative return and the Unix error code in 'errno'.
403 * If the SCSI command succeeds then 0 is returned. 292 * If the SCSI command succeeds then 0 is returned.
404 * Positive numbers returned are the compacted SCSI error codes (4 293 * Positive numbers returned are the compacted SCSI error codes (4
405 * bytes in one int) where the lowest byte is the SCSI status. 294 * bytes in one int) where the lowest byte is the SCSI status.
406 */ 295 */
407 #define OMAX_SB_LEN 16 /* For backward compatibility */ 296 #define OMAX_SB_LEN 16 /* For backward compatibility */
408 int sg_scsi_ioctl(struct file *file, struct request_queue *q, 297 int sg_scsi_ioctl(struct file *file, struct request_queue *q,
409 struct gendisk *disk, struct scsi_ioctl_command __user *sic) 298 struct gendisk *disk, struct scsi_ioctl_command __user *sic)
410 { 299 {
411 struct request *rq; 300 struct request *rq;
412 int err; 301 int err;
413 unsigned int in_len, out_len, bytes, opcode, cmdlen; 302 unsigned int in_len, out_len, bytes, opcode, cmdlen;
414 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; 303 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
415 304
416 if (!sic) 305 if (!sic)
417 return -EINVAL; 306 return -EINVAL;
418 307
419 /* 308 /*
420 * get in an out lengths, verify they don't exceed a page worth of data 309 * get in an out lengths, verify they don't exceed a page worth of data
421 */ 310 */
422 if (get_user(in_len, &sic->inlen)) 311 if (get_user(in_len, &sic->inlen))
423 return -EFAULT; 312 return -EFAULT;
424 if (get_user(out_len, &sic->outlen)) 313 if (get_user(out_len, &sic->outlen))
425 return -EFAULT; 314 return -EFAULT;
426 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) 315 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
427 return -EINVAL; 316 return -EINVAL;
428 if (get_user(opcode, sic->data)) 317 if (get_user(opcode, sic->data))
429 return -EFAULT; 318 return -EFAULT;
430 319
431 bytes = max(in_len, out_len); 320 bytes = max(in_len, out_len);
432 if (bytes) { 321 if (bytes) {
433 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); 322 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
434 if (!buffer) 323 if (!buffer)
435 return -ENOMEM; 324 return -ENOMEM;
436 325
437 } 326 }
438 327
439 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 328 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
440 329
441 cmdlen = COMMAND_SIZE(opcode); 330 cmdlen = COMMAND_SIZE(opcode);
442 331
443 /* 332 /*
444 * get command and data to send to device, if any 333 * get command and data to send to device, if any
445 */ 334 */
446 err = -EFAULT; 335 err = -EFAULT;
447 rq->cmd_len = cmdlen; 336 rq->cmd_len = cmdlen;
448 if (copy_from_user(rq->cmd, sic->data, cmdlen)) 337 if (copy_from_user(rq->cmd, sic->data, cmdlen))
449 goto error; 338 goto error;
450 339
451 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) 340 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
452 goto error; 341 goto error;
453 342
454 err = blk_verify_command(rq->cmd, file->f_mode & FMODE_WRITE); 343 err = blk_verify_command(file, rq->cmd);
455 if (err) 344 if (err)
456 goto error; 345 goto error;
457 346
458 /* default. possible overriden later */ 347 /* default. possible overriden later */
459 rq->retries = 5; 348 rq->retries = 5;
460 349
461 switch (opcode) { 350 switch (opcode) {
462 case SEND_DIAGNOSTIC: 351 case SEND_DIAGNOSTIC:
463 case FORMAT_UNIT: 352 case FORMAT_UNIT:
464 rq->timeout = FORMAT_UNIT_TIMEOUT; 353 rq->timeout = FORMAT_UNIT_TIMEOUT;
465 rq->retries = 1; 354 rq->retries = 1;
466 break; 355 break;
467 case START_STOP: 356 case START_STOP:
468 rq->timeout = START_STOP_TIMEOUT; 357 rq->timeout = START_STOP_TIMEOUT;
469 break; 358 break;
470 case MOVE_MEDIUM: 359 case MOVE_MEDIUM:
471 rq->timeout = MOVE_MEDIUM_TIMEOUT; 360 rq->timeout = MOVE_MEDIUM_TIMEOUT;
472 break; 361 break;
473 case READ_ELEMENT_STATUS: 362 case READ_ELEMENT_STATUS:
474 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; 363 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
475 break; 364 break;
476 case READ_DEFECT_DATA: 365 case READ_DEFECT_DATA:
477 rq->timeout = READ_DEFECT_DATA_TIMEOUT; 366 rq->timeout = READ_DEFECT_DATA_TIMEOUT;
478 rq->retries = 1; 367 rq->retries = 1;
479 break; 368 break;
480 default: 369 default:
481 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 370 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
482 break; 371 break;
483 } 372 }
484 373
485 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { 374 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
486 err = DRIVER_ERROR << 24; 375 err = DRIVER_ERROR << 24;
487 goto out; 376 goto out;
488 } 377 }
489 378
490 memset(sense, 0, sizeof(sense)); 379 memset(sense, 0, sizeof(sense));
491 rq->sense = sense; 380 rq->sense = sense;
492 rq->sense_len = 0; 381 rq->sense_len = 0;
493 rq->cmd_type = REQ_TYPE_BLOCK_PC; 382 rq->cmd_type = REQ_TYPE_BLOCK_PC;
494 383
495 blk_execute_rq(q, disk, rq, 0); 384 blk_execute_rq(q, disk, rq, 0);
496 385
497 out: 386 out:
498 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 387 err = rq->errors & 0xff; /* only 8 bit SCSI status */
499 if (err) { 388 if (err) {
500 if (rq->sense_len && rq->sense) { 389 if (rq->sense_len && rq->sense) {
501 bytes = (OMAX_SB_LEN > rq->sense_len) ? 390 bytes = (OMAX_SB_LEN > rq->sense_len) ?
502 rq->sense_len : OMAX_SB_LEN; 391 rq->sense_len : OMAX_SB_LEN;
503 if (copy_to_user(sic->data, rq->sense, bytes)) 392 if (copy_to_user(sic->data, rq->sense, bytes))
504 err = -EFAULT; 393 err = -EFAULT;
505 } 394 }
506 } else { 395 } else {
507 if (copy_to_user(sic->data, buffer, out_len)) 396 if (copy_to_user(sic->data, buffer, out_len))
508 err = -EFAULT; 397 err = -EFAULT;
509 } 398 }
510 399
511 error: 400 error:
512 kfree(buffer); 401 kfree(buffer);
513 blk_put_request(rq); 402 blk_put_request(rq);
514 return err; 403 return err;
515 } 404 }
516 EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 405 EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
517 406
518 /* Send basic block requests */ 407 /* Send basic block requests */
519 static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, 408 static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
520 int cmd, int data) 409 int cmd, int data)
521 { 410 {
522 struct request *rq; 411 struct request *rq;
523 int err; 412 int err;
524 413
525 rq = blk_get_request(q, WRITE, __GFP_WAIT); 414 rq = blk_get_request(q, WRITE, __GFP_WAIT);
526 rq->cmd_type = REQ_TYPE_BLOCK_PC; 415 rq->cmd_type = REQ_TYPE_BLOCK_PC;
527 rq->data = NULL; 416 rq->data = NULL;
528 rq->data_len = 0; 417 rq->data_len = 0;
529 rq->extra_len = 0; 418 rq->extra_len = 0;
530 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 419 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
531 rq->cmd[0] = cmd; 420 rq->cmd[0] = cmd;
532 rq->cmd[4] = data; 421 rq->cmd[4] = data;
533 rq->cmd_len = 6; 422 rq->cmd_len = 6;
534 err = blk_execute_rq(q, bd_disk, rq, 0); 423 err = blk_execute_rq(q, bd_disk, rq, 0);
535 blk_put_request(rq); 424 blk_put_request(rq);
536 425
537 return err; 426 return err;
538 } 427 }
539 428
540 static inline int blk_send_start_stop(struct request_queue *q, 429 static inline int blk_send_start_stop(struct request_queue *q,
541 struct gendisk *bd_disk, int data) 430 struct gendisk *bd_disk, int data)
542 { 431 {
543 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); 432 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
544 } 433 }
545 434
546 int scsi_cmd_ioctl(struct file *file, struct request_queue *q, 435 int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
547 struct gendisk *bd_disk, unsigned int cmd, void __user *arg) 436 struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
548 { 437 {
549 int err; 438 int err;
550 439
551 if (!q || blk_get_queue(q)) 440 if (!q || blk_get_queue(q))
552 return -ENXIO; 441 return -ENXIO;
553 442
554 switch (cmd) { 443 switch (cmd) {
555 /* 444 /*
556 * new sgv3 interface 445 * new sgv3 interface
557 */ 446 */
558 case SG_GET_VERSION_NUM: 447 case SG_GET_VERSION_NUM:
559 err = sg_get_version(arg); 448 err = sg_get_version(arg);
560 break; 449 break;
561 case SCSI_IOCTL_GET_IDLUN: 450 case SCSI_IOCTL_GET_IDLUN:
562 err = scsi_get_idlun(q, arg); 451 err = scsi_get_idlun(q, arg);
563 break; 452 break;
564 case SCSI_IOCTL_GET_BUS_NUMBER: 453 case SCSI_IOCTL_GET_BUS_NUMBER:
565 err = scsi_get_bus(q, arg); 454 err = scsi_get_bus(q, arg);
566 break; 455 break;
567 case SG_SET_TIMEOUT: 456 case SG_SET_TIMEOUT:
568 err = sg_set_timeout(q, arg); 457 err = sg_set_timeout(q, arg);
569 break; 458 break;
570 case SG_GET_TIMEOUT: 459 case SG_GET_TIMEOUT:
571 err = sg_get_timeout(q); 460 err = sg_get_timeout(q);
572 break; 461 break;
573 case SG_GET_RESERVED_SIZE: 462 case SG_GET_RESERVED_SIZE:
574 err = sg_get_reserved_size(q, arg); 463 err = sg_get_reserved_size(q, arg);
575 break; 464 break;
576 case SG_SET_RESERVED_SIZE: 465 case SG_SET_RESERVED_SIZE:
577 err = sg_set_reserved_size(q, arg); 466 err = sg_set_reserved_size(q, arg);
578 break; 467 break;
579 case SG_EMULATED_HOST: 468 case SG_EMULATED_HOST:
580 err = sg_emulated_host(q, arg); 469 err = sg_emulated_host(q, arg);
581 break; 470 break;
582 case SG_IO: { 471 case SG_IO: {
583 struct sg_io_hdr hdr; 472 struct sg_io_hdr hdr;
584 473
585 err = -EFAULT; 474 err = -EFAULT;
586 if (copy_from_user(&hdr, arg, sizeof(hdr))) 475 if (copy_from_user(&hdr, arg, sizeof(hdr)))
587 break; 476 break;
588 err = sg_io(file, q, bd_disk, &hdr); 477 err = sg_io(file, q, bd_disk, &hdr);
589 if (err == -EFAULT) 478 if (err == -EFAULT)
590 break; 479 break;
591 480
592 if (copy_to_user(arg, &hdr, sizeof(hdr))) 481 if (copy_to_user(arg, &hdr, sizeof(hdr)))
593 err = -EFAULT; 482 err = -EFAULT;
594 break; 483 break;
595 } 484 }
596 case CDROM_SEND_PACKET: { 485 case CDROM_SEND_PACKET: {
597 struct cdrom_generic_command cgc; 486 struct cdrom_generic_command cgc;
598 struct sg_io_hdr hdr; 487 struct sg_io_hdr hdr;
599 488
600 err = -EFAULT; 489 err = -EFAULT;
601 if (copy_from_user(&cgc, arg, sizeof(cgc))) 490 if (copy_from_user(&cgc, arg, sizeof(cgc)))
602 break; 491 break;
603 cgc.timeout = clock_t_to_jiffies(cgc.timeout); 492 cgc.timeout = clock_t_to_jiffies(cgc.timeout);
604 memset(&hdr, 0, sizeof(hdr)); 493 memset(&hdr, 0, sizeof(hdr));
605 hdr.interface_id = 'S'; 494 hdr.interface_id = 'S';
606 hdr.cmd_len = sizeof(cgc.cmd); 495 hdr.cmd_len = sizeof(cgc.cmd);
607 hdr.dxfer_len = cgc.buflen; 496 hdr.dxfer_len = cgc.buflen;
608 err = 0; 497 err = 0;
609 switch (cgc.data_direction) { 498 switch (cgc.data_direction) {
610 case CGC_DATA_UNKNOWN: 499 case CGC_DATA_UNKNOWN:
611 hdr.dxfer_direction = SG_DXFER_UNKNOWN; 500 hdr.dxfer_direction = SG_DXFER_UNKNOWN;
612 break; 501 break;
613 case CGC_DATA_WRITE: 502 case CGC_DATA_WRITE:
614 hdr.dxfer_direction = SG_DXFER_TO_DEV; 503 hdr.dxfer_direction = SG_DXFER_TO_DEV;
615 break; 504 break;
616 case CGC_DATA_READ: 505 case CGC_DATA_READ:
617 hdr.dxfer_direction = SG_DXFER_FROM_DEV; 506 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
618 break; 507 break;
619 case CGC_DATA_NONE: 508 case CGC_DATA_NONE:
620 hdr.dxfer_direction = SG_DXFER_NONE; 509 hdr.dxfer_direction = SG_DXFER_NONE;
621 break; 510 break;
622 default: 511 default:
623 err = -EINVAL; 512 err = -EINVAL;
624 } 513 }
625 if (err) 514 if (err)
626 break; 515 break;
627 516
628 hdr.dxferp = cgc.buffer; 517 hdr.dxferp = cgc.buffer;
629 hdr.sbp = cgc.sense; 518 hdr.sbp = cgc.sense;
630 if (hdr.sbp) 519 if (hdr.sbp)
631 hdr.mx_sb_len = sizeof(struct request_sense); 520 hdr.mx_sb_len = sizeof(struct request_sense);
632 hdr.timeout = cgc.timeout; 521 hdr.timeout = cgc.timeout;
633 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd; 522 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
634 hdr.cmd_len = sizeof(cgc.cmd); 523 hdr.cmd_len = sizeof(cgc.cmd);
635 524
636 err = sg_io(file, q, bd_disk, &hdr); 525 err = sg_io(file, q, bd_disk, &hdr);
637 if (err == -EFAULT) 526 if (err == -EFAULT)
638 break; 527 break;
639 528
640 if (hdr.status) 529 if (hdr.status)
641 err = -EIO; 530 err = -EIO;
642 531
643 cgc.stat = err; 532 cgc.stat = err;
644 cgc.buflen = hdr.resid; 533 cgc.buflen = hdr.resid;
645 if (copy_to_user(arg, &cgc, sizeof(cgc))) 534 if (copy_to_user(arg, &cgc, sizeof(cgc)))
646 err = -EFAULT; 535 err = -EFAULT;
647 536
648 break; 537 break;
649 } 538 }
650 539
651 /* 540 /*
652 * old junk scsi send command ioctl 541 * old junk scsi send command ioctl
653 */ 542 */
654 case SCSI_IOCTL_SEND_COMMAND: 543 case SCSI_IOCTL_SEND_COMMAND:
655 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm); 544 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
656 err = -EINVAL; 545 err = -EINVAL;
657 if (!arg) 546 if (!arg)
658 break; 547 break;
659 548
660 err = sg_scsi_ioctl(file, q, bd_disk, arg); 549 err = sg_scsi_ioctl(file, q, bd_disk, arg);
661 break; 550 break;
662 case CDROMCLOSETRAY: 551 case CDROMCLOSETRAY:
663 err = blk_send_start_stop(q, bd_disk, 0x03); 552 err = blk_send_start_stop(q, bd_disk, 0x03);
664 break; 553 break;
665 case CDROMEJECT: 554 case CDROMEJECT:
666 err = blk_send_start_stop(q, bd_disk, 0x02); 555 err = blk_send_start_stop(q, bd_disk, 0x02);
667 break; 556 break;
668 default: 557 default:
669 err = -ENOTTY; 558 err = -ENOTTY;
670 } 559 }
671 560
672 blk_put_queue(q); 561 blk_put_queue(q);
673 return err; 562 return err;
674 } 563 }
675 564
676 EXPORT_SYMBOL(scsi_cmd_ioctl); 565 EXPORT_SYMBOL(scsi_cmd_ioctl);
677 566
1 /* 1 /*
2 * History: 2 * History:
3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), 3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices. 4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY 5 * Development Sponsored by Killy Corp. NY NY
6 * 6 *
7 * Original driver (sg.c): 7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard 8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver: 9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert 10 * Copyright (C) 1998 - 2005 Douglas Gilbert
11 * 11 *
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option) 16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version. 17 * any later version.
18 * 18 *
19 */ 19 */
20 20
21 static int sg_version_num = 30534; /* 2 digits for each component */ 21 static int sg_version_num = 30534; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.34" 22 #define SG_VERSION_STR "3.5.34"
23 23
24 /* 24 /*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First 26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING 27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements). 28 * (otherwise the macros compile to empty statements).
29 * 29 *
30 */ 30 */
31 #include <linux/module.h> 31 #include <linux/module.h>
32 32
33 #include <linux/fs.h> 33 #include <linux/fs.h>
34 #include <linux/kernel.h> 34 #include <linux/kernel.h>
35 #include <linux/sched.h> 35 #include <linux/sched.h>
36 #include <linux/string.h> 36 #include <linux/string.h>
37 #include <linux/mm.h> 37 #include <linux/mm.h>
38 #include <linux/errno.h> 38 #include <linux/errno.h>
39 #include <linux/mtio.h> 39 #include <linux/mtio.h>
40 #include <linux/ioctl.h> 40 #include <linux/ioctl.h>
41 #include <linux/fcntl.h> 41 #include <linux/fcntl.h>
42 #include <linux/init.h> 42 #include <linux/init.h>
43 #include <linux/poll.h> 43 #include <linux/poll.h>
44 #include <linux/moduleparam.h> 44 #include <linux/moduleparam.h>
45 #include <linux/cdev.h> 45 #include <linux/cdev.h>
46 #include <linux/idr.h> 46 #include <linux/idr.h>
47 #include <linux/seq_file.h> 47 #include <linux/seq_file.h>
48 #include <linux/blkdev.h> 48 #include <linux/blkdev.h>
49 #include <linux/delay.h> 49 #include <linux/delay.h>
50 #include <linux/scatterlist.h> 50 #include <linux/scatterlist.h>
51 #include <linux/blktrace_api.h> 51 #include <linux/blktrace_api.h>
52 52
53 #include "scsi.h" 53 #include "scsi.h"
54 #include <scsi/scsi_dbg.h> 54 #include <scsi/scsi_dbg.h>
55 #include <scsi/scsi_host.h> 55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_driver.h> 56 #include <scsi/scsi_driver.h>
57 #include <scsi/scsi_ioctl.h> 57 #include <scsi/scsi_ioctl.h>
58 #include <scsi/sg.h> 58 #include <scsi/sg.h>
59 59
60 #include "scsi_logging.h" 60 #include "scsi_logging.h"
61 61
62 #ifdef CONFIG_SCSI_PROC_FS 62 #ifdef CONFIG_SCSI_PROC_FS
63 #include <linux/proc_fs.h> 63 #include <linux/proc_fs.h>
64 static char *sg_version_date = "20061027"; 64 static char *sg_version_date = "20061027";
65 65
66 static int sg_proc_init(void); 66 static int sg_proc_init(void);
67 static void sg_proc_cleanup(void); 67 static void sg_proc_cleanup(void);
68 #endif 68 #endif
69 69
70 #define SG_ALLOW_DIO_DEF 0 70 #define SG_ALLOW_DIO_DEF 0
71 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */ 71 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
72 72
73 #define SG_MAX_DEVS 32768 73 #define SG_MAX_DEVS 32768
74 74
75 /* 75 /*
76 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) 76 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
77 * Then when using 32 bit integers x * m may overflow during the calculation. 77 * Then when using 32 bit integers x * m may overflow during the calculation.
78 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m 78 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
79 * calculates the same, but prevents the overflow when both m and d 79 * calculates the same, but prevents the overflow when both m and d
80 * are "small" numbers (like HZ and USER_HZ). 80 * are "small" numbers (like HZ and USER_HZ).
81 * Of course an overflow is inavoidable if the result of muldiv doesn't fit 81 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
82 * in 32 bits. 82 * in 32 bits.
83 */ 83 */
84 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) 84 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
85 85
86 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) 86 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
87 87
88 int sg_big_buff = SG_DEF_RESERVED_SIZE; 88 int sg_big_buff = SG_DEF_RESERVED_SIZE;
89 /* N.B. This variable is readable and writeable via 89 /* N.B. This variable is readable and writeable via
90 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer 90 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
91 of this size (or less if there is not enough memory) will be reserved 91 of this size (or less if there is not enough memory) will be reserved
92 for use by this file descriptor. [Deprecated usage: this variable is also 92 for use by this file descriptor. [Deprecated usage: this variable is also
93 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into 93 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
94 the kernel (i.e. it is not a module).] */ 94 the kernel (i.e. it is not a module).] */
95 static int def_reserved_size = -1; /* picks up init parameter */ 95 static int def_reserved_size = -1; /* picks up init parameter */
96 static int sg_allow_dio = SG_ALLOW_DIO_DEF; 96 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
97 97
98 static int scatter_elem_sz = SG_SCATTER_SZ; 98 static int scatter_elem_sz = SG_SCATTER_SZ;
99 static int scatter_elem_sz_prev = SG_SCATTER_SZ; 99 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
100 100
101 #define SG_SECTOR_SZ 512 101 #define SG_SECTOR_SZ 512
102 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) 102 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
103 103
104 static int sg_add(struct device *, struct class_interface *); 104 static int sg_add(struct device *, struct class_interface *);
105 static void sg_remove(struct device *, struct class_interface *); 105 static void sg_remove(struct device *, struct class_interface *);
106 106
107 static DEFINE_IDR(sg_index_idr); 107 static DEFINE_IDR(sg_index_idr);
108 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock 108 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
109 file descriptor list for device */ 109 file descriptor list for device */
110 110
111 static struct class_interface sg_interface = { 111 static struct class_interface sg_interface = {
112 .add_dev = sg_add, 112 .add_dev = sg_add,
113 .remove_dev = sg_remove, 113 .remove_dev = sg_remove,
114 }; 114 };
115 115
116 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ 116 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
117 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 117 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
118 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ 118 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
119 unsigned bufflen; /* Size of (aggregate) data buffer */ 119 unsigned bufflen; /* Size of (aggregate) data buffer */
120 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 120 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
121 struct scatterlist *buffer;/* scatter list */ 121 struct scatterlist *buffer;/* scatter list */
122 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 122 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
123 unsigned char cmd_opcode; /* first byte of command */ 123 unsigned char cmd_opcode; /* first byte of command */
124 } Sg_scatter_hold; 124 } Sg_scatter_hold;
125 125
126 struct sg_device; /* forward declarations */ 126 struct sg_device; /* forward declarations */
127 struct sg_fd; 127 struct sg_fd;
128 128
129 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ 129 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
130 struct sg_request *nextrp; /* NULL -> tail request (slist) */ 130 struct sg_request *nextrp; /* NULL -> tail request (slist) */
131 struct sg_fd *parentfp; /* NULL -> not in use */ 131 struct sg_fd *parentfp; /* NULL -> not in use */
132 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ 132 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
133 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ 133 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
134 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; 134 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
135 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ 135 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
136 char orphan; /* 1 -> drop on sight, 0 -> normal */ 136 char orphan; /* 1 -> drop on sight, 0 -> normal */
137 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 137 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
138 volatile char done; /* 0->before bh, 1->before read, 2->read */ 138 volatile char done; /* 0->before bh, 1->before read, 2->read */
139 } Sg_request; 139 } Sg_request;
140 140
141 typedef struct sg_fd { /* holds the state of a file descriptor */ 141 typedef struct sg_fd { /* holds the state of a file descriptor */
142 struct sg_fd *nextfp; /* NULL when last opened fd on this device */ 142 struct sg_fd *nextfp; /* NULL when last opened fd on this device */
143 struct sg_device *parentdp; /* owning device */ 143 struct sg_device *parentdp; /* owning device */
144 wait_queue_head_t read_wait; /* queue read until command done */ 144 wait_queue_head_t read_wait; /* queue read until command done */
145 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 145 rwlock_t rq_list_lock; /* protect access to list in req_arr */
146 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ 146 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
147 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ 147 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
148 Sg_scatter_hold reserve; /* buffer held for this file descriptor */ 148 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
149 unsigned save_scat_len; /* original length of trunc. scat. element */ 149 unsigned save_scat_len; /* original length of trunc. scat. element */
150 Sg_request *headrp; /* head of request slist, NULL->empty */ 150 Sg_request *headrp; /* head of request slist, NULL->empty */
151 struct fasync_struct *async_qp; /* used by asynchronous notification */ 151 struct fasync_struct *async_qp; /* used by asynchronous notification */
152 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ 152 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
153 char low_dma; /* as in parent but possibly overridden to 1 */ 153 char low_dma; /* as in parent but possibly overridden to 1 */
154 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ 154 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
155 volatile char closed; /* 1 -> fd closed but request(s) outstanding */ 155 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
156 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ 156 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
157 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ 157 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
158 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ 158 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
159 char mmap_called; /* 0 -> mmap() never called on this fd */ 159 char mmap_called; /* 0 -> mmap() never called on this fd */
160 } Sg_fd; 160 } Sg_fd;
161 161
162 typedef struct sg_device { /* holds the state of each scsi generic device */ 162 typedef struct sg_device { /* holds the state of each scsi generic device */
163 struct scsi_device *device; 163 struct scsi_device *device;
164 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 164 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
165 int sg_tablesize; /* adapter's max scatter-gather table size */ 165 int sg_tablesize; /* adapter's max scatter-gather table size */
166 u32 index; /* device index number */ 166 u32 index; /* device index number */
167 Sg_fd *headfp; /* first open fd belonging to this device */ 167 Sg_fd *headfp; /* first open fd belonging to this device */
168 volatile char detached; /* 0->attached, 1->detached pending removal */ 168 volatile char detached; /* 0->attached, 1->detached pending removal */
169 volatile char exclude; /* opened for exclusive access */ 169 volatile char exclude; /* opened for exclusive access */
170 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 170 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
171 struct gendisk *disk; 171 struct gendisk *disk;
172 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 172 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
173 } Sg_device; 173 } Sg_device;
174 174
175 static int sg_fasync(int fd, struct file *filp, int mode); 175 static int sg_fasync(int fd, struct file *filp, int mode);
176 /* tasklet or soft irq callback */ 176 /* tasklet or soft irq callback */
177 static void sg_cmd_done(void *data, char *sense, int result, int resid); 177 static void sg_cmd_done(void *data, char *sense, int result, int resid);
178 static int sg_start_req(Sg_request * srp); 178 static int sg_start_req(Sg_request * srp);
179 static void sg_finish_rem_req(Sg_request * srp); 179 static void sg_finish_rem_req(Sg_request * srp);
180 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 180 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
181 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 181 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
182 int tablesize); 182 int tablesize);
183 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 183 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
184 Sg_request * srp); 184 Sg_request * srp);
185 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 185 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
186 int blocking, int read_only, Sg_request ** o_srp); 186 const char __user *buf, size_t count, int blocking,
187 int read_only, Sg_request **o_srp);
187 static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 188 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
188 unsigned char *cmnd, int timeout, int blocking); 189 unsigned char *cmnd, int timeout, int blocking);
189 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 190 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
190 int wr_xf, int *countp, unsigned char __user **up); 191 int wr_xf, int *countp, unsigned char __user **up);
191 static int sg_write_xfer(Sg_request * srp); 192 static int sg_write_xfer(Sg_request * srp);
192 static int sg_read_xfer(Sg_request * srp); 193 static int sg_read_xfer(Sg_request * srp);
193 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 194 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
194 static void sg_remove_scat(Sg_scatter_hold * schp); 195 static void sg_remove_scat(Sg_scatter_hold * schp);
195 static void sg_build_reserve(Sg_fd * sfp, int req_size); 196 static void sg_build_reserve(Sg_fd * sfp, int req_size);
196 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 197 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
197 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 198 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
198 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp); 199 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
199 static void sg_page_free(struct page *page, int size); 200 static void sg_page_free(struct page *page, int size);
200 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 201 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
201 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 202 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
202 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 203 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
203 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); 204 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
204 static Sg_request *sg_add_request(Sg_fd * sfp); 205 static Sg_request *sg_add_request(Sg_fd * sfp);
205 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 206 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
206 static int sg_res_in_use(Sg_fd * sfp); 207 static int sg_res_in_use(Sg_fd * sfp);
207 static int sg_allow_access(unsigned char opcode, char dev_type);
208 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); 208 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
209 static Sg_device *sg_get_dev(int dev); 209 static Sg_device *sg_get_dev(int dev);
210 #ifdef CONFIG_SCSI_PROC_FS 210 #ifdef CONFIG_SCSI_PROC_FS
211 static int sg_last_dev(void); 211 static int sg_last_dev(void);
212 #endif 212 #endif
213 213
214 #define SZ_SG_HEADER sizeof(struct sg_header) 214 #define SZ_SG_HEADER sizeof(struct sg_header)
215 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) 215 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
216 #define SZ_SG_IOVEC sizeof(sg_iovec_t) 216 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
217 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 217 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
218 218
219 static int 219 static int
220 sg_open(struct inode *inode, struct file *filp) 220 sg_open(struct inode *inode, struct file *filp)
221 { 221 {
222 int dev = iminor(inode); 222 int dev = iminor(inode);
223 int flags = filp->f_flags; 223 int flags = filp->f_flags;
224 struct request_queue *q; 224 struct request_queue *q;
225 Sg_device *sdp; 225 Sg_device *sdp;
226 Sg_fd *sfp; 226 Sg_fd *sfp;
227 int res; 227 int res;
228 int retval; 228 int retval;
229 229
230 nonseekable_open(inode, filp); 230 nonseekable_open(inode, filp);
231 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 231 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
232 sdp = sg_get_dev(dev); 232 sdp = sg_get_dev(dev);
233 if ((!sdp) || (!sdp->device)) 233 if ((!sdp) || (!sdp->device))
234 return -ENXIO; 234 return -ENXIO;
235 if (sdp->detached) 235 if (sdp->detached)
236 return -ENODEV; 236 return -ENODEV;
237 237
238 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 238 /* This driver's module count bumped by fops_get in <linux/fs.h> */
239 /* Prevent the device driver from vanishing while we sleep */ 239 /* Prevent the device driver from vanishing while we sleep */
240 retval = scsi_device_get(sdp->device); 240 retval = scsi_device_get(sdp->device);
241 if (retval) 241 if (retval)
242 return retval; 242 return retval;
243 243
244 if (!((flags & O_NONBLOCK) || 244 if (!((flags & O_NONBLOCK) ||
245 scsi_block_when_processing_errors(sdp->device))) { 245 scsi_block_when_processing_errors(sdp->device))) {
246 retval = -ENXIO; 246 retval = -ENXIO;
247 /* we are in error recovery for this device */ 247 /* we are in error recovery for this device */
248 goto error_out; 248 goto error_out;
249 } 249 }
250 250
251 if (flags & O_EXCL) { 251 if (flags & O_EXCL) {
252 if (O_RDONLY == (flags & O_ACCMODE)) { 252 if (O_RDONLY == (flags & O_ACCMODE)) {
253 retval = -EPERM; /* Can't lock it with read only access */ 253 retval = -EPERM; /* Can't lock it with read only access */
254 goto error_out; 254 goto error_out;
255 } 255 }
256 if (sdp->headfp && (flags & O_NONBLOCK)) { 256 if (sdp->headfp && (flags & O_NONBLOCK)) {
257 retval = -EBUSY; 257 retval = -EBUSY;
258 goto error_out; 258 goto error_out;
259 } 259 }
260 res = 0; 260 res = 0;
261 __wait_event_interruptible(sdp->o_excl_wait, 261 __wait_event_interruptible(sdp->o_excl_wait,
262 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); 262 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
263 if (res) { 263 if (res) {
264 retval = res; /* -ERESTARTSYS because signal hit process */ 264 retval = res; /* -ERESTARTSYS because signal hit process */
265 goto error_out; 265 goto error_out;
266 } 266 }
267 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */ 267 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
268 if (flags & O_NONBLOCK) { 268 if (flags & O_NONBLOCK) {
269 retval = -EBUSY; 269 retval = -EBUSY;
270 goto error_out; 270 goto error_out;
271 } 271 }
272 res = 0; 272 res = 0;
273 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude), 273 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
274 res); 274 res);
275 if (res) { 275 if (res) {
276 retval = res; /* -ERESTARTSYS because signal hit process */ 276 retval = res; /* -ERESTARTSYS because signal hit process */
277 goto error_out; 277 goto error_out;
278 } 278 }
279 } 279 }
280 if (sdp->detached) { 280 if (sdp->detached) {
281 retval = -ENODEV; 281 retval = -ENODEV;
282 goto error_out; 282 goto error_out;
283 } 283 }
284 if (!sdp->headfp) { /* no existing opens on this device */ 284 if (!sdp->headfp) { /* no existing opens on this device */
285 sdp->sgdebug = 0; 285 sdp->sgdebug = 0;
286 q = sdp->device->request_queue; 286 q = sdp->device->request_queue;
287 sdp->sg_tablesize = min(q->max_hw_segments, 287 sdp->sg_tablesize = min(q->max_hw_segments,
288 q->max_phys_segments); 288 q->max_phys_segments);
289 } 289 }
290 if ((sfp = sg_add_sfp(sdp, dev))) 290 if ((sfp = sg_add_sfp(sdp, dev)))
291 filp->private_data = sfp; 291 filp->private_data = sfp;
292 else { 292 else {
293 if (flags & O_EXCL) 293 if (flags & O_EXCL)
294 sdp->exclude = 0; /* undo if error */ 294 sdp->exclude = 0; /* undo if error */
295 retval = -ENOMEM; 295 retval = -ENOMEM;
296 goto error_out; 296 goto error_out;
297 } 297 }
298 return 0; 298 return 0;
299 299
300 error_out: 300 error_out:
301 scsi_device_put(sdp->device); 301 scsi_device_put(sdp->device);
302 return retval; 302 return retval;
303 } 303 }
304 304
305 /* Following function was formerly called 'sg_close' */ 305 /* Following function was formerly called 'sg_close' */
306 static int 306 static int
307 sg_release(struct inode *inode, struct file *filp) 307 sg_release(struct inode *inode, struct file *filp)
308 { 308 {
309 Sg_device *sdp; 309 Sg_device *sdp;
310 Sg_fd *sfp; 310 Sg_fd *sfp;
311 311
312 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 312 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
313 return -ENXIO; 313 return -ENXIO;
314 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 314 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
315 sg_fasync(-1, filp, 0); /* remove filp from async notification list */ 315 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
316 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 316 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
317 if (!sdp->detached) { 317 if (!sdp->detached) {
318 scsi_device_put(sdp->device); 318 scsi_device_put(sdp->device);
319 } 319 }
320 sdp->exclude = 0; 320 sdp->exclude = 0;
321 wake_up_interruptible(&sdp->o_excl_wait); 321 wake_up_interruptible(&sdp->o_excl_wait);
322 } 322 }
323 return 0; 323 return 0;
324 } 324 }
325 325
326 static ssize_t 326 static ssize_t
327 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) 327 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
328 { 328 {
329 Sg_device *sdp; 329 Sg_device *sdp;
330 Sg_fd *sfp; 330 Sg_fd *sfp;
331 Sg_request *srp; 331 Sg_request *srp;
332 int req_pack_id = -1; 332 int req_pack_id = -1;
333 sg_io_hdr_t *hp; 333 sg_io_hdr_t *hp;
334 struct sg_header *old_hdr = NULL; 334 struct sg_header *old_hdr = NULL;
335 int retval = 0; 335 int retval = 0;
336 336
337 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 337 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
338 return -ENXIO; 338 return -ENXIO;
339 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", 339 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
340 sdp->disk->disk_name, (int) count)); 340 sdp->disk->disk_name, (int) count));
341 341
342 if (!access_ok(VERIFY_WRITE, buf, count)) 342 if (!access_ok(VERIFY_WRITE, buf, count))
343 return -EFAULT; 343 return -EFAULT;
344 if (sfp->force_packid && (count >= SZ_SG_HEADER)) { 344 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
345 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 345 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
346 if (!old_hdr) 346 if (!old_hdr)
347 return -ENOMEM; 347 return -ENOMEM;
348 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { 348 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
349 retval = -EFAULT; 349 retval = -EFAULT;
350 goto free_old_hdr; 350 goto free_old_hdr;
351 } 351 }
352 if (old_hdr->reply_len < 0) { 352 if (old_hdr->reply_len < 0) {
353 if (count >= SZ_SG_IO_HDR) { 353 if (count >= SZ_SG_IO_HDR) {
354 sg_io_hdr_t *new_hdr; 354 sg_io_hdr_t *new_hdr;
355 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); 355 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
356 if (!new_hdr) { 356 if (!new_hdr) {
357 retval = -ENOMEM; 357 retval = -ENOMEM;
358 goto free_old_hdr; 358 goto free_old_hdr;
359 } 359 }
360 retval =__copy_from_user 360 retval =__copy_from_user
361 (new_hdr, buf, SZ_SG_IO_HDR); 361 (new_hdr, buf, SZ_SG_IO_HDR);
362 req_pack_id = new_hdr->pack_id; 362 req_pack_id = new_hdr->pack_id;
363 kfree(new_hdr); 363 kfree(new_hdr);
364 if (retval) { 364 if (retval) {
365 retval = -EFAULT; 365 retval = -EFAULT;
366 goto free_old_hdr; 366 goto free_old_hdr;
367 } 367 }
368 } 368 }
369 } else 369 } else
370 req_pack_id = old_hdr->pack_id; 370 req_pack_id = old_hdr->pack_id;
371 } 371 }
372 srp = sg_get_rq_mark(sfp, req_pack_id); 372 srp = sg_get_rq_mark(sfp, req_pack_id);
373 if (!srp) { /* now wait on packet to arrive */ 373 if (!srp) { /* now wait on packet to arrive */
374 if (sdp->detached) { 374 if (sdp->detached) {
375 retval = -ENODEV; 375 retval = -ENODEV;
376 goto free_old_hdr; 376 goto free_old_hdr;
377 } 377 }
378 if (filp->f_flags & O_NONBLOCK) { 378 if (filp->f_flags & O_NONBLOCK) {
379 retval = -EAGAIN; 379 retval = -EAGAIN;
380 goto free_old_hdr; 380 goto free_old_hdr;
381 } 381 }
382 while (1) { 382 while (1) {
383 retval = 0; /* following macro beats race condition */ 383 retval = 0; /* following macro beats race condition */
384 __wait_event_interruptible(sfp->read_wait, 384 __wait_event_interruptible(sfp->read_wait,
385 (sdp->detached || 385 (sdp->detached ||
386 (srp = sg_get_rq_mark(sfp, req_pack_id))), 386 (srp = sg_get_rq_mark(sfp, req_pack_id))),
387 retval); 387 retval);
388 if (sdp->detached) { 388 if (sdp->detached) {
389 retval = -ENODEV; 389 retval = -ENODEV;
390 goto free_old_hdr; 390 goto free_old_hdr;
391 } 391 }
392 if (0 == retval) 392 if (0 == retval)
393 break; 393 break;
394 394
395 /* -ERESTARTSYS as signal hit process */ 395 /* -ERESTARTSYS as signal hit process */
396 goto free_old_hdr; 396 goto free_old_hdr;
397 } 397 }
398 } 398 }
399 if (srp->header.interface_id != '\0') { 399 if (srp->header.interface_id != '\0') {
400 retval = sg_new_read(sfp, buf, count, srp); 400 retval = sg_new_read(sfp, buf, count, srp);
401 goto free_old_hdr; 401 goto free_old_hdr;
402 } 402 }
403 403
404 hp = &srp->header; 404 hp = &srp->header;
405 if (old_hdr == NULL) { 405 if (old_hdr == NULL) {
406 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 406 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
407 if (! old_hdr) { 407 if (! old_hdr) {
408 retval = -ENOMEM; 408 retval = -ENOMEM;
409 goto free_old_hdr; 409 goto free_old_hdr;
410 } 410 }
411 } 411 }
412 memset(old_hdr, 0, SZ_SG_HEADER); 412 memset(old_hdr, 0, SZ_SG_HEADER);
413 old_hdr->reply_len = (int) hp->timeout; 413 old_hdr->reply_len = (int) hp->timeout;
414 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ 414 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
415 old_hdr->pack_id = hp->pack_id; 415 old_hdr->pack_id = hp->pack_id;
416 old_hdr->twelve_byte = 416 old_hdr->twelve_byte =
417 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; 417 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
418 old_hdr->target_status = hp->masked_status; 418 old_hdr->target_status = hp->masked_status;
419 old_hdr->host_status = hp->host_status; 419 old_hdr->host_status = hp->host_status;
420 old_hdr->driver_status = hp->driver_status; 420 old_hdr->driver_status = hp->driver_status;
421 if ((CHECK_CONDITION & hp->masked_status) || 421 if ((CHECK_CONDITION & hp->masked_status) ||
422 (DRIVER_SENSE & hp->driver_status)) 422 (DRIVER_SENSE & hp->driver_status))
423 memcpy(old_hdr->sense_buffer, srp->sense_b, 423 memcpy(old_hdr->sense_buffer, srp->sense_b,
424 sizeof (old_hdr->sense_buffer)); 424 sizeof (old_hdr->sense_buffer));
425 switch (hp->host_status) { 425 switch (hp->host_status) {
426 /* This setup of 'result' is for backward compatibility and is best 426 /* This setup of 'result' is for backward compatibility and is best
427 ignored by the user who should use target, host + driver status */ 427 ignored by the user who should use target, host + driver status */
428 case DID_OK: 428 case DID_OK:
429 case DID_PASSTHROUGH: 429 case DID_PASSTHROUGH:
430 case DID_SOFT_ERROR: 430 case DID_SOFT_ERROR:
431 old_hdr->result = 0; 431 old_hdr->result = 0;
432 break; 432 break;
433 case DID_NO_CONNECT: 433 case DID_NO_CONNECT:
434 case DID_BUS_BUSY: 434 case DID_BUS_BUSY:
435 case DID_TIME_OUT: 435 case DID_TIME_OUT:
436 old_hdr->result = EBUSY; 436 old_hdr->result = EBUSY;
437 break; 437 break;
438 case DID_BAD_TARGET: 438 case DID_BAD_TARGET:
439 case DID_ABORT: 439 case DID_ABORT:
440 case DID_PARITY: 440 case DID_PARITY:
441 case DID_RESET: 441 case DID_RESET:
442 case DID_BAD_INTR: 442 case DID_BAD_INTR:
443 old_hdr->result = EIO; 443 old_hdr->result = EIO;
444 break; 444 break;
445 case DID_ERROR: 445 case DID_ERROR:
446 old_hdr->result = (srp->sense_b[0] == 0 && 446 old_hdr->result = (srp->sense_b[0] == 0 &&
447 hp->masked_status == GOOD) ? 0 : EIO; 447 hp->masked_status == GOOD) ? 0 : EIO;
448 break; 448 break;
449 default: 449 default:
450 old_hdr->result = EIO; 450 old_hdr->result = EIO;
451 break; 451 break;
452 } 452 }
453 453
454 /* Now copy the result back to the user buffer. */ 454 /* Now copy the result back to the user buffer. */
455 if (count >= SZ_SG_HEADER) { 455 if (count >= SZ_SG_HEADER) {
456 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { 456 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
457 retval = -EFAULT; 457 retval = -EFAULT;
458 goto free_old_hdr; 458 goto free_old_hdr;
459 } 459 }
460 buf += SZ_SG_HEADER; 460 buf += SZ_SG_HEADER;
461 if (count > old_hdr->reply_len) 461 if (count > old_hdr->reply_len)
462 count = old_hdr->reply_len; 462 count = old_hdr->reply_len;
463 if (count > SZ_SG_HEADER) { 463 if (count > SZ_SG_HEADER) {
464 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { 464 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
465 retval = -EFAULT; 465 retval = -EFAULT;
466 goto free_old_hdr; 466 goto free_old_hdr;
467 } 467 }
468 } 468 }
469 } else 469 } else
470 count = (old_hdr->result == 0) ? 0 : -EIO; 470 count = (old_hdr->result == 0) ? 0 : -EIO;
471 sg_finish_rem_req(srp); 471 sg_finish_rem_req(srp);
472 retval = count; 472 retval = count;
473 free_old_hdr: 473 free_old_hdr:
474 kfree(old_hdr); 474 kfree(old_hdr);
475 return retval; 475 return retval;
476 } 476 }
477 477
478 static ssize_t 478 static ssize_t
479 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 479 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
480 { 480 {
481 sg_io_hdr_t *hp = &srp->header; 481 sg_io_hdr_t *hp = &srp->header;
482 int err = 0; 482 int err = 0;
483 int len; 483 int len;
484 484
485 if (count < SZ_SG_IO_HDR) { 485 if (count < SZ_SG_IO_HDR) {
486 err = -EINVAL; 486 err = -EINVAL;
487 goto err_out; 487 goto err_out;
488 } 488 }
489 hp->sb_len_wr = 0; 489 hp->sb_len_wr = 0;
490 if ((hp->mx_sb_len > 0) && hp->sbp) { 490 if ((hp->mx_sb_len > 0) && hp->sbp) {
491 if ((CHECK_CONDITION & hp->masked_status) || 491 if ((CHECK_CONDITION & hp->masked_status) ||
492 (DRIVER_SENSE & hp->driver_status)) { 492 (DRIVER_SENSE & hp->driver_status)) {
493 int sb_len = SCSI_SENSE_BUFFERSIZE; 493 int sb_len = SCSI_SENSE_BUFFERSIZE;
494 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; 494 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
495 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ 495 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
496 len = (len > sb_len) ? sb_len : len; 496 len = (len > sb_len) ? sb_len : len;
497 if (copy_to_user(hp->sbp, srp->sense_b, len)) { 497 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
498 err = -EFAULT; 498 err = -EFAULT;
499 goto err_out; 499 goto err_out;
500 } 500 }
501 hp->sb_len_wr = len; 501 hp->sb_len_wr = len;
502 } 502 }
503 } 503 }
504 if (hp->masked_status || hp->host_status || hp->driver_status) 504 if (hp->masked_status || hp->host_status || hp->driver_status)
505 hp->info |= SG_INFO_CHECK; 505 hp->info |= SG_INFO_CHECK;
506 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { 506 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
507 err = -EFAULT; 507 err = -EFAULT;
508 goto err_out; 508 goto err_out;
509 } 509 }
510 err = sg_read_xfer(srp); 510 err = sg_read_xfer(srp);
511 err_out: 511 err_out:
512 sg_finish_rem_req(srp); 512 sg_finish_rem_req(srp);
513 return (0 == err) ? count : err; 513 return (0 == err) ? count : err;
514 } 514 }
515 515
516 static ssize_t 516 static ssize_t
517 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) 517 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
518 { 518 {
519 int mxsize, cmd_size, k; 519 int mxsize, cmd_size, k;
520 int input_size, blocking; 520 int input_size, blocking;
521 unsigned char opcode; 521 unsigned char opcode;
522 Sg_device *sdp; 522 Sg_device *sdp;
523 Sg_fd *sfp; 523 Sg_fd *sfp;
524 Sg_request *srp; 524 Sg_request *srp;
525 struct sg_header old_hdr; 525 struct sg_header old_hdr;
526 sg_io_hdr_t *hp; 526 sg_io_hdr_t *hp;
527 unsigned char cmnd[MAX_COMMAND_SIZE]; 527 unsigned char cmnd[MAX_COMMAND_SIZE];
528 528
529 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 529 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
530 return -ENXIO; 530 return -ENXIO;
531 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", 531 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
532 sdp->disk->disk_name, (int) count)); 532 sdp->disk->disk_name, (int) count));
533 if (sdp->detached) 533 if (sdp->detached)
534 return -ENODEV; 534 return -ENODEV;
535 if (!((filp->f_flags & O_NONBLOCK) || 535 if (!((filp->f_flags & O_NONBLOCK) ||
536 scsi_block_when_processing_errors(sdp->device))) 536 scsi_block_when_processing_errors(sdp->device)))
537 return -ENXIO; 537 return -ENXIO;
538 538
539 if (!access_ok(VERIFY_READ, buf, count)) 539 if (!access_ok(VERIFY_READ, buf, count))
540 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 540 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
541 if (count < SZ_SG_HEADER) 541 if (count < SZ_SG_HEADER)
542 return -EIO; 542 return -EIO;
543 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) 543 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
544 return -EFAULT; 544 return -EFAULT;
545 blocking = !(filp->f_flags & O_NONBLOCK); 545 blocking = !(filp->f_flags & O_NONBLOCK);
546 if (old_hdr.reply_len < 0) 546 if (old_hdr.reply_len < 0)
547 return sg_new_write(sfp, buf, count, blocking, 0, NULL); 547 return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
548 if (count < (SZ_SG_HEADER + 6)) 548 if (count < (SZ_SG_HEADER + 6))
549 return -EIO; /* The minimum scsi command length is 6 bytes. */ 549 return -EIO; /* The minimum scsi command length is 6 bytes. */
550 550
551 if (!(srp = sg_add_request(sfp))) { 551 if (!(srp = sg_add_request(sfp))) {
552 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); 552 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
553 return -EDOM; 553 return -EDOM;
554 } 554 }
555 buf += SZ_SG_HEADER; 555 buf += SZ_SG_HEADER;
556 __get_user(opcode, buf); 556 __get_user(opcode, buf);
557 if (sfp->next_cmd_len > 0) { 557 if (sfp->next_cmd_len > 0) {
558 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { 558 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
559 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); 559 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
560 sfp->next_cmd_len = 0; 560 sfp->next_cmd_len = 0;
561 sg_remove_request(sfp, srp); 561 sg_remove_request(sfp, srp);
562 return -EIO; 562 return -EIO;
563 } 563 }
564 cmd_size = sfp->next_cmd_len; 564 cmd_size = sfp->next_cmd_len;
565 sfp->next_cmd_len = 0; /* reset so only this write() effected */ 565 sfp->next_cmd_len = 0; /* reset so only this write() effected */
566 } else { 566 } else {
567 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ 567 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
568 if ((opcode >= 0xc0) && old_hdr.twelve_byte) 568 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
569 cmd_size = 12; 569 cmd_size = 12;
570 } 570 }
571 SCSI_LOG_TIMEOUT(4, printk( 571 SCSI_LOG_TIMEOUT(4, printk(
572 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); 572 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
573 /* Determine buffer size. */ 573 /* Determine buffer size. */
574 input_size = count - cmd_size; 574 input_size = count - cmd_size;
575 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; 575 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
576 mxsize -= SZ_SG_HEADER; 576 mxsize -= SZ_SG_HEADER;
577 input_size -= SZ_SG_HEADER; 577 input_size -= SZ_SG_HEADER;
578 if (input_size < 0) { 578 if (input_size < 0) {
579 sg_remove_request(sfp, srp); 579 sg_remove_request(sfp, srp);
580 return -EIO; /* User did not pass enough bytes for this command. */ 580 return -EIO; /* User did not pass enough bytes for this command. */
581 } 581 }
582 hp = &srp->header; 582 hp = &srp->header;
583 hp->interface_id = '\0'; /* indicator of old interface tunnelled */ 583 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
584 hp->cmd_len = (unsigned char) cmd_size; 584 hp->cmd_len = (unsigned char) cmd_size;
585 hp->iovec_count = 0; 585 hp->iovec_count = 0;
586 hp->mx_sb_len = 0; 586 hp->mx_sb_len = 0;
587 if (input_size > 0) 587 if (input_size > 0)
588 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? 588 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
589 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; 589 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
590 else 590 else
591 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 591 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
592 hp->dxfer_len = mxsize; 592 hp->dxfer_len = mxsize;
593 hp->dxferp = (char __user *)buf + cmd_size; 593 hp->dxferp = (char __user *)buf + cmd_size;
594 hp->sbp = NULL; 594 hp->sbp = NULL;
595 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 595 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
596 hp->flags = input_size; /* structure abuse ... */ 596 hp->flags = input_size; /* structure abuse ... */
597 hp->pack_id = old_hdr.pack_id; 597 hp->pack_id = old_hdr.pack_id;
598 hp->usr_ptr = NULL; 598 hp->usr_ptr = NULL;
599 if (__copy_from_user(cmnd, buf, cmd_size)) 599 if (__copy_from_user(cmnd, buf, cmd_size))
600 return -EFAULT; 600 return -EFAULT;
601 /* 601 /*
602 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, 602 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
603 * but is is possible that the app intended SG_DXFER_TO_DEV, because there 603 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
604 * is a non-zero input_size, so emit a warning. 604 * is a non-zero input_size, so emit a warning.
605 */ 605 */
606 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { 606 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
607 static char cmd[TASK_COMM_LEN]; 607 static char cmd[TASK_COMM_LEN];
608 if (strcmp(current->comm, cmd) && printk_ratelimit()) { 608 if (strcmp(current->comm, cmd) && printk_ratelimit()) {
609 printk(KERN_WARNING 609 printk(KERN_WARNING
610 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" 610 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
611 "guessing data in;\n" KERN_WARNING " " 611 "guessing data in;\n" KERN_WARNING " "
612 "program %s not setting count and/or reply_len properly\n", 612 "program %s not setting count and/or reply_len properly\n",
613 old_hdr.reply_len - (int)SZ_SG_HEADER, 613 old_hdr.reply_len - (int)SZ_SG_HEADER,
614 input_size, (unsigned int) cmnd[0], 614 input_size, (unsigned int) cmnd[0],
615 current->comm); 615 current->comm);
616 strcpy(cmd, current->comm); 616 strcpy(cmd, current->comm);
617 } 617 }
618 } 618 }
619 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); 619 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
620 return (k < 0) ? k : count; 620 return (k < 0) ? k : count;
621 } 621 }
622 622
623 static ssize_t 623 static ssize_t
624 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 624 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
625 int blocking, int read_only, Sg_request ** o_srp) 625 size_t count, int blocking, int read_only,
626 Sg_request **o_srp)
626 { 627 {
627 int k; 628 int k;
628 Sg_request *srp; 629 Sg_request *srp;
629 sg_io_hdr_t *hp; 630 sg_io_hdr_t *hp;
630 unsigned char cmnd[MAX_COMMAND_SIZE]; 631 unsigned char cmnd[MAX_COMMAND_SIZE];
631 int timeout; 632 int timeout;
632 unsigned long ul_timeout; 633 unsigned long ul_timeout;
633 634
634 if (count < SZ_SG_IO_HDR) 635 if (count < SZ_SG_IO_HDR)
635 return -EINVAL; 636 return -EINVAL;
636 if (!access_ok(VERIFY_READ, buf, count)) 637 if (!access_ok(VERIFY_READ, buf, count))
637 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 638 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
638 639
639 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ 640 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
640 if (!(srp = sg_add_request(sfp))) { 641 if (!(srp = sg_add_request(sfp))) {
641 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); 642 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
642 return -EDOM; 643 return -EDOM;
643 } 644 }
644 hp = &srp->header; 645 hp = &srp->header;
645 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { 646 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
646 sg_remove_request(sfp, srp); 647 sg_remove_request(sfp, srp);
647 return -EFAULT; 648 return -EFAULT;
648 } 649 }
649 if (hp->interface_id != 'S') { 650 if (hp->interface_id != 'S') {
650 sg_remove_request(sfp, srp); 651 sg_remove_request(sfp, srp);
651 return -ENOSYS; 652 return -ENOSYS;
652 } 653 }
653 if (hp->flags & SG_FLAG_MMAP_IO) { 654 if (hp->flags & SG_FLAG_MMAP_IO) {
654 if (hp->dxfer_len > sfp->reserve.bufflen) { 655 if (hp->dxfer_len > sfp->reserve.bufflen) {
655 sg_remove_request(sfp, srp); 656 sg_remove_request(sfp, srp);
656 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ 657 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
657 } 658 }
658 if (hp->flags & SG_FLAG_DIRECT_IO) { 659 if (hp->flags & SG_FLAG_DIRECT_IO) {
659 sg_remove_request(sfp, srp); 660 sg_remove_request(sfp, srp);
660 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ 661 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
661 } 662 }
662 if (sg_res_in_use(sfp)) { 663 if (sg_res_in_use(sfp)) {
663 sg_remove_request(sfp, srp); 664 sg_remove_request(sfp, srp);
664 return -EBUSY; /* reserve buffer already being used */ 665 return -EBUSY; /* reserve buffer already being used */
665 } 666 }
666 } 667 }
667 ul_timeout = msecs_to_jiffies(srp->header.timeout); 668 ul_timeout = msecs_to_jiffies(srp->header.timeout);
668 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; 669 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
669 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { 670 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
670 sg_remove_request(sfp, srp); 671 sg_remove_request(sfp, srp);
671 return -EMSGSIZE; 672 return -EMSGSIZE;
672 } 673 }
673 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { 674 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
674 sg_remove_request(sfp, srp); 675 sg_remove_request(sfp, srp);
675 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 676 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
676 } 677 }
677 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { 678 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
678 sg_remove_request(sfp, srp); 679 sg_remove_request(sfp, srp);
679 return -EFAULT; 680 return -EFAULT;
680 } 681 }
681 if (read_only && 682 if (read_only && (!blk_verify_command(file, cmnd))) {
682 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
683 sg_remove_request(sfp, srp); 683 sg_remove_request(sfp, srp);
684 return -EPERM; 684 return -EPERM;
685 } 685 }
686 k = sg_common_write(sfp, srp, cmnd, timeout, blocking); 686 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
687 if (k < 0) 687 if (k < 0)
688 return k; 688 return k;
689 if (o_srp) 689 if (o_srp)
690 *o_srp = srp; 690 *o_srp = srp;
691 return count; 691 return count;
692 } 692 }
693 693
694 static int 694 static int
695 sg_common_write(Sg_fd * sfp, Sg_request * srp, 695 sg_common_write(Sg_fd * sfp, Sg_request * srp,
696 unsigned char *cmnd, int timeout, int blocking) 696 unsigned char *cmnd, int timeout, int blocking)
697 { 697 {
698 int k, data_dir; 698 int k, data_dir;
699 Sg_device *sdp = sfp->parentdp; 699 Sg_device *sdp = sfp->parentdp;
700 sg_io_hdr_t *hp = &srp->header; 700 sg_io_hdr_t *hp = &srp->header;
701 701
702 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ 702 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
703 hp->status = 0; 703 hp->status = 0;
704 hp->masked_status = 0; 704 hp->masked_status = 0;
705 hp->msg_status = 0; 705 hp->msg_status = 0;
706 hp->info = 0; 706 hp->info = 0;
707 hp->host_status = 0; 707 hp->host_status = 0;
708 hp->driver_status = 0; 708 hp->driver_status = 0;
709 hp->resid = 0; 709 hp->resid = 0;
710 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 710 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
711 (int) cmnd[0], (int) hp->cmd_len)); 711 (int) cmnd[0], (int) hp->cmd_len));
712 712
713 if ((k = sg_start_req(srp))) { 713 if ((k = sg_start_req(srp))) {
714 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 714 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
715 sg_finish_rem_req(srp); 715 sg_finish_rem_req(srp);
716 return k; /* probably out of space --> ENOMEM */ 716 return k; /* probably out of space --> ENOMEM */
717 } 717 }
718 if ((k = sg_write_xfer(srp))) { 718 if ((k = sg_write_xfer(srp))) {
719 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n")); 719 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
720 sg_finish_rem_req(srp); 720 sg_finish_rem_req(srp);
721 return k; 721 return k;
722 } 722 }
723 if (sdp->detached) { 723 if (sdp->detached) {
724 sg_finish_rem_req(srp); 724 sg_finish_rem_req(srp);
725 return -ENODEV; 725 return -ENODEV;
726 } 726 }
727 727
728 switch (hp->dxfer_direction) { 728 switch (hp->dxfer_direction) {
729 case SG_DXFER_TO_FROM_DEV: 729 case SG_DXFER_TO_FROM_DEV:
730 case SG_DXFER_FROM_DEV: 730 case SG_DXFER_FROM_DEV:
731 data_dir = DMA_FROM_DEVICE; 731 data_dir = DMA_FROM_DEVICE;
732 break; 732 break;
733 case SG_DXFER_TO_DEV: 733 case SG_DXFER_TO_DEV:
734 data_dir = DMA_TO_DEVICE; 734 data_dir = DMA_TO_DEVICE;
735 break; 735 break;
736 case SG_DXFER_UNKNOWN: 736 case SG_DXFER_UNKNOWN:
737 data_dir = DMA_BIDIRECTIONAL; 737 data_dir = DMA_BIDIRECTIONAL;
738 break; 738 break;
739 default: 739 default:
740 data_dir = DMA_NONE; 740 data_dir = DMA_NONE;
741 break; 741 break;
742 } 742 }
743 hp->duration = jiffies_to_msecs(jiffies); 743 hp->duration = jiffies_to_msecs(jiffies);
744 /* Now send everything of to mid-level. The next time we hear about this 744 /* Now send everything of to mid-level. The next time we hear about this
745 packet is when sg_cmd_done() is called (i.e. a callback). */ 745 packet is when sg_cmd_done() is called (i.e. a callback). */
746 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 746 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
747 hp->dxfer_len, srp->data.k_use_sg, timeout, 747 hp->dxfer_len, srp->data.k_use_sg, timeout,
748 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 748 SG_DEFAULT_RETRIES, srp, sg_cmd_done,
749 GFP_ATOMIC)) { 749 GFP_ATOMIC)) {
750 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n")); 750 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
751 /* 751 /*
752 * most likely out of mem, but could also be a bad map 752 * most likely out of mem, but could also be a bad map
753 */ 753 */
754 sg_finish_rem_req(srp); 754 sg_finish_rem_req(srp);
755 return -ENOMEM; 755 return -ENOMEM;
756 } else 756 } else
757 return 0; 757 return 0;
758 } 758 }
759 759
760 static int 760 static int
761 sg_srp_done(Sg_request *srp, Sg_fd *sfp) 761 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
762 { 762 {
763 unsigned long iflags; 763 unsigned long iflags;
764 int done; 764 int done;
765 765
766 read_lock_irqsave(&sfp->rq_list_lock, iflags); 766 read_lock_irqsave(&sfp->rq_list_lock, iflags);
767 done = srp->done; 767 done = srp->done;
768 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 768 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
769 return done; 769 return done;
770 } 770 }
771 771
772 static int 772 static int
773 sg_ioctl(struct inode *inode, struct file *filp, 773 sg_ioctl(struct inode *inode, struct file *filp,
774 unsigned int cmd_in, unsigned long arg) 774 unsigned int cmd_in, unsigned long arg)
775 { 775 {
776 void __user *p = (void __user *)arg; 776 void __user *p = (void __user *)arg;
777 int __user *ip = p; 777 int __user *ip = p;
778 int result, val, read_only; 778 int result, val, read_only;
779 Sg_device *sdp; 779 Sg_device *sdp;
780 Sg_fd *sfp; 780 Sg_fd *sfp;
781 Sg_request *srp; 781 Sg_request *srp;
782 unsigned long iflags; 782 unsigned long iflags;
783 783
784 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 784 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
785 return -ENXIO; 785 return -ENXIO;
786 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 786 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
787 sdp->disk->disk_name, (int) cmd_in)); 787 sdp->disk->disk_name, (int) cmd_in));
788 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 788 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
789 789
790 switch (cmd_in) { 790 switch (cmd_in) {
791 case SG_IO: 791 case SG_IO:
792 { 792 {
793 int blocking = 1; /* ignore O_NONBLOCK flag */ 793 int blocking = 1; /* ignore O_NONBLOCK flag */
794 794
795 if (sdp->detached) 795 if (sdp->detached)
796 return -ENODEV; 796 return -ENODEV;
797 if (!scsi_block_when_processing_errors(sdp->device)) 797 if (!scsi_block_when_processing_errors(sdp->device))
798 return -ENXIO; 798 return -ENXIO;
799 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 799 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
800 return -EFAULT; 800 return -EFAULT;
801 result = 801 result =
802 sg_new_write(sfp, p, SZ_SG_IO_HDR, 802 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
803 blocking, read_only, &srp); 803 blocking, read_only, &srp);
804 if (result < 0) 804 if (result < 0)
805 return result; 805 return result;
806 srp->sg_io_owned = 1; 806 srp->sg_io_owned = 1;
807 while (1) { 807 while (1) {
808 result = 0; /* following macro to beat race condition */ 808 result = 0; /* following macro to beat race condition */
809 __wait_event_interruptible(sfp->read_wait, 809 __wait_event_interruptible(sfp->read_wait,
810 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), 810 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
811 result); 811 result);
812 if (sdp->detached) 812 if (sdp->detached)
813 return -ENODEV; 813 return -ENODEV;
814 if (sfp->closed) 814 if (sfp->closed)
815 return 0; /* request packet dropped already */ 815 return 0; /* request packet dropped already */
816 if (0 == result) 816 if (0 == result)
817 break; 817 break;
818 srp->orphan = 1; 818 srp->orphan = 1;
819 return result; /* -ERESTARTSYS because signal hit process */ 819 return result; /* -ERESTARTSYS because signal hit process */
820 } 820 }
821 write_lock_irqsave(&sfp->rq_list_lock, iflags); 821 write_lock_irqsave(&sfp->rq_list_lock, iflags);
822 srp->done = 2; 822 srp->done = 2;
823 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 823 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
824 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); 824 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
825 return (result < 0) ? result : 0; 825 return (result < 0) ? result : 0;
826 } 826 }
827 case SG_SET_TIMEOUT: 827 case SG_SET_TIMEOUT:
828 result = get_user(val, ip); 828 result = get_user(val, ip);
829 if (result) 829 if (result)
830 return result; 830 return result;
831 if (val < 0) 831 if (val < 0)
832 return -EIO; 832 return -EIO;
833 if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) 833 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
834 val = MULDIV (INT_MAX, USER_HZ, HZ); 834 val = MULDIV (INT_MAX, USER_HZ, HZ);
835 sfp->timeout_user = val; 835 sfp->timeout_user = val;
836 sfp->timeout = MULDIV (val, HZ, USER_HZ); 836 sfp->timeout = MULDIV (val, HZ, USER_HZ);
837 837
838 return 0; 838 return 0;
839 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ 839 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
840 /* strange ..., for backward compatibility */ 840 /* strange ..., for backward compatibility */
841 return sfp->timeout_user; 841 return sfp->timeout_user;
842 case SG_SET_FORCE_LOW_DMA: 842 case SG_SET_FORCE_LOW_DMA:
843 result = get_user(val, ip); 843 result = get_user(val, ip);
844 if (result) 844 if (result)
845 return result; 845 return result;
846 if (val) { 846 if (val) {
847 sfp->low_dma = 1; 847 sfp->low_dma = 1;
848 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { 848 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
849 val = (int) sfp->reserve.bufflen; 849 val = (int) sfp->reserve.bufflen;
850 sg_remove_scat(&sfp->reserve); 850 sg_remove_scat(&sfp->reserve);
851 sg_build_reserve(sfp, val); 851 sg_build_reserve(sfp, val);
852 } 852 }
853 } else { 853 } else {
854 if (sdp->detached) 854 if (sdp->detached)
855 return -ENODEV; 855 return -ENODEV;
856 sfp->low_dma = sdp->device->host->unchecked_isa_dma; 856 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
857 } 857 }
858 return 0; 858 return 0;
859 case SG_GET_LOW_DMA: 859 case SG_GET_LOW_DMA:
860 return put_user((int) sfp->low_dma, ip); 860 return put_user((int) sfp->low_dma, ip);
861 case SG_GET_SCSI_ID: 861 case SG_GET_SCSI_ID:
862 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) 862 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
863 return -EFAULT; 863 return -EFAULT;
864 else { 864 else {
865 sg_scsi_id_t __user *sg_idp = p; 865 sg_scsi_id_t __user *sg_idp = p;
866 866
867 if (sdp->detached) 867 if (sdp->detached)
868 return -ENODEV; 868 return -ENODEV;
869 __put_user((int) sdp->device->host->host_no, 869 __put_user((int) sdp->device->host->host_no,
870 &sg_idp->host_no); 870 &sg_idp->host_no);
871 __put_user((int) sdp->device->channel, 871 __put_user((int) sdp->device->channel,
872 &sg_idp->channel); 872 &sg_idp->channel);
873 __put_user((int) sdp->device->id, &sg_idp->scsi_id); 873 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
874 __put_user((int) sdp->device->lun, &sg_idp->lun); 874 __put_user((int) sdp->device->lun, &sg_idp->lun);
875 __put_user((int) sdp->device->type, &sg_idp->scsi_type); 875 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
876 __put_user((short) sdp->device->host->cmd_per_lun, 876 __put_user((short) sdp->device->host->cmd_per_lun,
877 &sg_idp->h_cmd_per_lun); 877 &sg_idp->h_cmd_per_lun);
878 __put_user((short) sdp->device->queue_depth, 878 __put_user((short) sdp->device->queue_depth,
879 &sg_idp->d_queue_depth); 879 &sg_idp->d_queue_depth);
880 __put_user(0, &sg_idp->unused[0]); 880 __put_user(0, &sg_idp->unused[0]);
881 __put_user(0, &sg_idp->unused[1]); 881 __put_user(0, &sg_idp->unused[1]);
882 return 0; 882 return 0;
883 } 883 }
884 case SG_SET_FORCE_PACK_ID: 884 case SG_SET_FORCE_PACK_ID:
885 result = get_user(val, ip); 885 result = get_user(val, ip);
886 if (result) 886 if (result)
887 return result; 887 return result;
888 sfp->force_packid = val ? 1 : 0; 888 sfp->force_packid = val ? 1 : 0;
889 return 0; 889 return 0;
890 case SG_GET_PACK_ID: 890 case SG_GET_PACK_ID:
891 if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) 891 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
892 return -EFAULT; 892 return -EFAULT;
893 read_lock_irqsave(&sfp->rq_list_lock, iflags); 893 read_lock_irqsave(&sfp->rq_list_lock, iflags);
894 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 894 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
895 if ((1 == srp->done) && (!srp->sg_io_owned)) { 895 if ((1 == srp->done) && (!srp->sg_io_owned)) {
896 read_unlock_irqrestore(&sfp->rq_list_lock, 896 read_unlock_irqrestore(&sfp->rq_list_lock,
897 iflags); 897 iflags);
898 __put_user(srp->header.pack_id, ip); 898 __put_user(srp->header.pack_id, ip);
899 return 0; 899 return 0;
900 } 900 }
901 } 901 }
902 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 902 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
903 __put_user(-1, ip); 903 __put_user(-1, ip);
904 return 0; 904 return 0;
905 case SG_GET_NUM_WAITING: 905 case SG_GET_NUM_WAITING:
906 read_lock_irqsave(&sfp->rq_list_lock, iflags); 906 read_lock_irqsave(&sfp->rq_list_lock, iflags);
907 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { 907 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
908 if ((1 == srp->done) && (!srp->sg_io_owned)) 908 if ((1 == srp->done) && (!srp->sg_io_owned))
909 ++val; 909 ++val;
910 } 910 }
911 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 911 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
912 return put_user(val, ip); 912 return put_user(val, ip);
913 case SG_GET_SG_TABLESIZE: 913 case SG_GET_SG_TABLESIZE:
914 return put_user(sdp->sg_tablesize, ip); 914 return put_user(sdp->sg_tablesize, ip);
915 case SG_SET_RESERVED_SIZE: 915 case SG_SET_RESERVED_SIZE:
916 result = get_user(val, ip); 916 result = get_user(val, ip);
917 if (result) 917 if (result)
918 return result; 918 return result;
919 if (val < 0) 919 if (val < 0)
920 return -EINVAL; 920 return -EINVAL;
921 val = min_t(int, val, 921 val = min_t(int, val,
922 sdp->device->request_queue->max_sectors * 512); 922 sdp->device->request_queue->max_sectors * 512);
923 if (val != sfp->reserve.bufflen) { 923 if (val != sfp->reserve.bufflen) {
924 if (sg_res_in_use(sfp) || sfp->mmap_called) 924 if (sg_res_in_use(sfp) || sfp->mmap_called)
925 return -EBUSY; 925 return -EBUSY;
926 sg_remove_scat(&sfp->reserve); 926 sg_remove_scat(&sfp->reserve);
927 sg_build_reserve(sfp, val); 927 sg_build_reserve(sfp, val);
928 } 928 }
929 return 0; 929 return 0;
930 case SG_GET_RESERVED_SIZE: 930 case SG_GET_RESERVED_SIZE:
931 val = min_t(int, sfp->reserve.bufflen, 931 val = min_t(int, sfp->reserve.bufflen,
932 sdp->device->request_queue->max_sectors * 512); 932 sdp->device->request_queue->max_sectors * 512);
933 return put_user(val, ip); 933 return put_user(val, ip);
934 case SG_SET_COMMAND_Q: 934 case SG_SET_COMMAND_Q:
935 result = get_user(val, ip); 935 result = get_user(val, ip);
936 if (result) 936 if (result)
937 return result; 937 return result;
938 sfp->cmd_q = val ? 1 : 0; 938 sfp->cmd_q = val ? 1 : 0;
939 return 0; 939 return 0;
940 case SG_GET_COMMAND_Q: 940 case SG_GET_COMMAND_Q:
941 return put_user((int) sfp->cmd_q, ip); 941 return put_user((int) sfp->cmd_q, ip);
942 case SG_SET_KEEP_ORPHAN: 942 case SG_SET_KEEP_ORPHAN:
943 result = get_user(val, ip); 943 result = get_user(val, ip);
944 if (result) 944 if (result)
945 return result; 945 return result;
946 sfp->keep_orphan = val; 946 sfp->keep_orphan = val;
947 return 0; 947 return 0;
948 case SG_GET_KEEP_ORPHAN: 948 case SG_GET_KEEP_ORPHAN:
949 return put_user((int) sfp->keep_orphan, ip); 949 return put_user((int) sfp->keep_orphan, ip);
950 case SG_NEXT_CMD_LEN: 950 case SG_NEXT_CMD_LEN:
951 result = get_user(val, ip); 951 result = get_user(val, ip);
952 if (result) 952 if (result)
953 return result; 953 return result;
954 sfp->next_cmd_len = (val > 0) ? val : 0; 954 sfp->next_cmd_len = (val > 0) ? val : 0;
955 return 0; 955 return 0;
956 case SG_GET_VERSION_NUM: 956 case SG_GET_VERSION_NUM:
957 return put_user(sg_version_num, ip); 957 return put_user(sg_version_num, ip);
958 case SG_GET_ACCESS_COUNT: 958 case SG_GET_ACCESS_COUNT:
959 /* faked - we don't have a real access count anymore */ 959 /* faked - we don't have a real access count anymore */
960 val = (sdp->device ? 1 : 0); 960 val = (sdp->device ? 1 : 0);
961 return put_user(val, ip); 961 return put_user(val, ip);
962 case SG_GET_REQUEST_TABLE: 962 case SG_GET_REQUEST_TABLE:
963 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 963 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
964 return -EFAULT; 964 return -EFAULT;
965 else { 965 else {
966 sg_req_info_t *rinfo; 966 sg_req_info_t *rinfo;
967 unsigned int ms; 967 unsigned int ms;
968 968
969 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, 969 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
970 GFP_KERNEL); 970 GFP_KERNEL);
971 if (!rinfo) 971 if (!rinfo)
972 return -ENOMEM; 972 return -ENOMEM;
973 read_lock_irqsave(&sfp->rq_list_lock, iflags); 973 read_lock_irqsave(&sfp->rq_list_lock, iflags);
974 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; 974 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
975 ++val, srp = srp ? srp->nextrp : srp) { 975 ++val, srp = srp ? srp->nextrp : srp) {
976 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 976 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
977 if (srp) { 977 if (srp) {
978 rinfo[val].req_state = srp->done + 1; 978 rinfo[val].req_state = srp->done + 1;
979 rinfo[val].problem = 979 rinfo[val].problem =
980 srp->header.masked_status & 980 srp->header.masked_status &
981 srp->header.host_status & 981 srp->header.host_status &
982 srp->header.driver_status; 982 srp->header.driver_status;
983 if (srp->done) 983 if (srp->done)
984 rinfo[val].duration = 984 rinfo[val].duration =
985 srp->header.duration; 985 srp->header.duration;
986 else { 986 else {
987 ms = jiffies_to_msecs(jiffies); 987 ms = jiffies_to_msecs(jiffies);
988 rinfo[val].duration = 988 rinfo[val].duration =
989 (ms > srp->header.duration) ? 989 (ms > srp->header.duration) ?
990 (ms - srp->header.duration) : 0; 990 (ms - srp->header.duration) : 0;
991 } 991 }
992 rinfo[val].orphan = srp->orphan; 992 rinfo[val].orphan = srp->orphan;
993 rinfo[val].sg_io_owned = 993 rinfo[val].sg_io_owned =
994 srp->sg_io_owned; 994 srp->sg_io_owned;
995 rinfo[val].pack_id = 995 rinfo[val].pack_id =
996 srp->header.pack_id; 996 srp->header.pack_id;
997 rinfo[val].usr_ptr = 997 rinfo[val].usr_ptr =
998 srp->header.usr_ptr; 998 srp->header.usr_ptr;
999 } 999 }
1000 } 1000 }
1001 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1001 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1002 result = __copy_to_user(p, rinfo, 1002 result = __copy_to_user(p, rinfo,
1003 SZ_SG_REQ_INFO * SG_MAX_QUEUE); 1003 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1004 result = result ? -EFAULT : 0; 1004 result = result ? -EFAULT : 0;
1005 kfree(rinfo); 1005 kfree(rinfo);
1006 return result; 1006 return result;
1007 } 1007 }
1008 case SG_EMULATED_HOST: 1008 case SG_EMULATED_HOST:
1009 if (sdp->detached) 1009 if (sdp->detached)
1010 return -ENODEV; 1010 return -ENODEV;
1011 return put_user(sdp->device->host->hostt->emulated, ip); 1011 return put_user(sdp->device->host->hostt->emulated, ip);
1012 case SG_SCSI_RESET: 1012 case SG_SCSI_RESET:
1013 if (sdp->detached) 1013 if (sdp->detached)
1014 return -ENODEV; 1014 return -ENODEV;
1015 if (filp->f_flags & O_NONBLOCK) { 1015 if (filp->f_flags & O_NONBLOCK) {
1016 if (scsi_host_in_recovery(sdp->device->host)) 1016 if (scsi_host_in_recovery(sdp->device->host))
1017 return -EBUSY; 1017 return -EBUSY;
1018 } else if (!scsi_block_when_processing_errors(sdp->device)) 1018 } else if (!scsi_block_when_processing_errors(sdp->device))
1019 return -EBUSY; 1019 return -EBUSY;
1020 result = get_user(val, ip); 1020 result = get_user(val, ip);
1021 if (result) 1021 if (result)
1022 return result; 1022 return result;
1023 if (SG_SCSI_RESET_NOTHING == val) 1023 if (SG_SCSI_RESET_NOTHING == val)
1024 return 0; 1024 return 0;
1025 switch (val) { 1025 switch (val) {
1026 case SG_SCSI_RESET_DEVICE: 1026 case SG_SCSI_RESET_DEVICE:
1027 val = SCSI_TRY_RESET_DEVICE; 1027 val = SCSI_TRY_RESET_DEVICE;
1028 break; 1028 break;
1029 case SG_SCSI_RESET_BUS: 1029 case SG_SCSI_RESET_BUS:
1030 val = SCSI_TRY_RESET_BUS; 1030 val = SCSI_TRY_RESET_BUS;
1031 break; 1031 break;
1032 case SG_SCSI_RESET_HOST: 1032 case SG_SCSI_RESET_HOST:
1033 val = SCSI_TRY_RESET_HOST; 1033 val = SCSI_TRY_RESET_HOST;
1034 break; 1034 break;
1035 default: 1035 default:
1036 return -EINVAL; 1036 return -EINVAL;
1037 } 1037 }
1038 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 1038 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1039 return -EACCES; 1039 return -EACCES;
1040 return (scsi_reset_provider(sdp->device, val) == 1040 return (scsi_reset_provider(sdp->device, val) ==
1041 SUCCESS) ? 0 : -EIO; 1041 SUCCESS) ? 0 : -EIO;
1042 case SCSI_IOCTL_SEND_COMMAND: 1042 case SCSI_IOCTL_SEND_COMMAND:
1043 if (sdp->detached) 1043 if (sdp->detached)
1044 return -ENODEV; 1044 return -ENODEV;
1045 if (read_only) { 1045 if (read_only) {
1046 unsigned char opcode = WRITE_6; 1046 unsigned char opcode = WRITE_6;
1047 Scsi_Ioctl_Command __user *siocp = p; 1047 Scsi_Ioctl_Command __user *siocp = p;
1048 1048
1049 if (copy_from_user(&opcode, siocp->data, 1)) 1049 if (copy_from_user(&opcode, siocp->data, 1))
1050 return -EFAULT; 1050 return -EFAULT;
1051 if (!sg_allow_access(opcode, sdp->device->type)) 1051 if (!blk_verify_command(filp, &opcode))
1052 return -EPERM; 1052 return -EPERM;
1053 } 1053 }
1054 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1054 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
1055 case SG_SET_DEBUG: 1055 case SG_SET_DEBUG:
1056 result = get_user(val, ip); 1056 result = get_user(val, ip);
1057 if (result) 1057 if (result)
1058 return result; 1058 return result;
1059 sdp->sgdebug = (char) val; 1059 sdp->sgdebug = (char) val;
1060 return 0; 1060 return 0;
1061 case SCSI_IOCTL_GET_IDLUN: 1061 case SCSI_IOCTL_GET_IDLUN:
1062 case SCSI_IOCTL_GET_BUS_NUMBER: 1062 case SCSI_IOCTL_GET_BUS_NUMBER:
1063 case SCSI_IOCTL_PROBE_HOST: 1063 case SCSI_IOCTL_PROBE_HOST:
1064 case SG_GET_TRANSFORM: 1064 case SG_GET_TRANSFORM:
1065 if (sdp->detached) 1065 if (sdp->detached)
1066 return -ENODEV; 1066 return -ENODEV;
1067 return scsi_ioctl(sdp->device, cmd_in, p); 1067 return scsi_ioctl(sdp->device, cmd_in, p);
1068 case BLKSECTGET: 1068 case BLKSECTGET:
1069 return put_user(sdp->device->request_queue->max_sectors * 512, 1069 return put_user(sdp->device->request_queue->max_sectors * 512,
1070 ip); 1070 ip);
1071 case BLKTRACESETUP: 1071 case BLKTRACESETUP:
1072 return blk_trace_setup(sdp->device->request_queue, 1072 return blk_trace_setup(sdp->device->request_queue,
1073 sdp->disk->disk_name, 1073 sdp->disk->disk_name,
1074 sdp->device->sdev_gendev.devt, 1074 sdp->device->sdev_gendev.devt,
1075 (char *)arg); 1075 (char *)arg);
1076 case BLKTRACESTART: 1076 case BLKTRACESTART:
1077 return blk_trace_startstop(sdp->device->request_queue, 1); 1077 return blk_trace_startstop(sdp->device->request_queue, 1);
1078 case BLKTRACESTOP: 1078 case BLKTRACESTOP:
1079 return blk_trace_startstop(sdp->device->request_queue, 0); 1079 return blk_trace_startstop(sdp->device->request_queue, 0);
1080 case BLKTRACETEARDOWN: 1080 case BLKTRACETEARDOWN:
1081 return blk_trace_remove(sdp->device->request_queue); 1081 return blk_trace_remove(sdp->device->request_queue);
1082 default: 1082 default:
1083 if (read_only) 1083 if (read_only)
1084 return -EPERM; /* don't know so take safe approach */ 1084 return -EPERM; /* don't know so take safe approach */
1085 return scsi_ioctl(sdp->device, cmd_in, p); 1085 return scsi_ioctl(sdp->device, cmd_in, p);
1086 } 1086 }
1087 } 1087 }
1088 1088
1089 #ifdef CONFIG_COMPAT 1089 #ifdef CONFIG_COMPAT
1090 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 1090 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1091 { 1091 {
1092 Sg_device *sdp; 1092 Sg_device *sdp;
1093 Sg_fd *sfp; 1093 Sg_fd *sfp;
1094 struct scsi_device *sdev; 1094 struct scsi_device *sdev;
1095 1095
1096 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1096 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1097 return -ENXIO; 1097 return -ENXIO;
1098 1098
1099 sdev = sdp->device; 1099 sdev = sdp->device;
1100 if (sdev->host->hostt->compat_ioctl) { 1100 if (sdev->host->hostt->compat_ioctl) {
1101 int ret; 1101 int ret;
1102 1102
1103 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); 1103 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1104 1104
1105 return ret; 1105 return ret;
1106 } 1106 }
1107 1107
1108 return -ENOIOCTLCMD; 1108 return -ENOIOCTLCMD;
1109 } 1109 }
1110 #endif 1110 #endif
1111 1111
1112 static unsigned int 1112 static unsigned int
1113 sg_poll(struct file *filp, poll_table * wait) 1113 sg_poll(struct file *filp, poll_table * wait)
1114 { 1114 {
1115 unsigned int res = 0; 1115 unsigned int res = 0;
1116 Sg_device *sdp; 1116 Sg_device *sdp;
1117 Sg_fd *sfp; 1117 Sg_fd *sfp;
1118 Sg_request *srp; 1118 Sg_request *srp;
1119 int count = 0; 1119 int count = 0;
1120 unsigned long iflags; 1120 unsigned long iflags;
1121 1121
1122 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)) 1122 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1123 || sfp->closed) 1123 || sfp->closed)
1124 return POLLERR; 1124 return POLLERR;
1125 poll_wait(filp, &sfp->read_wait, wait); 1125 poll_wait(filp, &sfp->read_wait, wait);
1126 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1126 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1127 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 1127 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1128 /* if any read waiting, flag it */ 1128 /* if any read waiting, flag it */
1129 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) 1129 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1130 res = POLLIN | POLLRDNORM; 1130 res = POLLIN | POLLRDNORM;
1131 ++count; 1131 ++count;
1132 } 1132 }
1133 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1133 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1134 1134
1135 if (sdp->detached) 1135 if (sdp->detached)
1136 res |= POLLHUP; 1136 res |= POLLHUP;
1137 else if (!sfp->cmd_q) { 1137 else if (!sfp->cmd_q) {
1138 if (0 == count) 1138 if (0 == count)
1139 res |= POLLOUT | POLLWRNORM; 1139 res |= POLLOUT | POLLWRNORM;
1140 } else if (count < SG_MAX_QUEUE) 1140 } else if (count < SG_MAX_QUEUE)
1141 res |= POLLOUT | POLLWRNORM; 1141 res |= POLLOUT | POLLWRNORM;
1142 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n", 1142 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1143 sdp->disk->disk_name, (int) res)); 1143 sdp->disk->disk_name, (int) res));
1144 return res; 1144 return res;
1145 } 1145 }
1146 1146
1147 static int 1147 static int
1148 sg_fasync(int fd, struct file *filp, int mode) 1148 sg_fasync(int fd, struct file *filp, int mode)
1149 { 1149 {
1150 int retval; 1150 int retval;
1151 Sg_device *sdp; 1151 Sg_device *sdp;
1152 Sg_fd *sfp; 1152 Sg_fd *sfp;
1153 1153
1154 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1154 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1155 return -ENXIO; 1155 return -ENXIO;
1156 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n", 1156 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1157 sdp->disk->disk_name, mode)); 1157 sdp->disk->disk_name, mode));
1158 1158
1159 retval = fasync_helper(fd, filp, mode, &sfp->async_qp); 1159 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1160 return (retval < 0) ? retval : 0; 1160 return (retval < 0) ? retval : 0;
1161 } 1161 }
1162 1162
1163 static int 1163 static int
1164 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1164 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1165 { 1165 {
1166 Sg_fd *sfp; 1166 Sg_fd *sfp;
1167 unsigned long offset, len, sa; 1167 unsigned long offset, len, sa;
1168 Sg_scatter_hold *rsv_schp; 1168 Sg_scatter_hold *rsv_schp;
1169 struct scatterlist *sg; 1169 struct scatterlist *sg;
1170 int k; 1170 int k;
1171 1171
1172 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1172 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1173 return VM_FAULT_SIGBUS; 1173 return VM_FAULT_SIGBUS;
1174 rsv_schp = &sfp->reserve; 1174 rsv_schp = &sfp->reserve;
1175 offset = vmf->pgoff << PAGE_SHIFT; 1175 offset = vmf->pgoff << PAGE_SHIFT;
1176 if (offset >= rsv_schp->bufflen) 1176 if (offset >= rsv_schp->bufflen)
1177 return VM_FAULT_SIGBUS; 1177 return VM_FAULT_SIGBUS;
1178 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", 1178 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1179 offset, rsv_schp->k_use_sg)); 1179 offset, rsv_schp->k_use_sg));
1180 sg = rsv_schp->buffer; 1180 sg = rsv_schp->buffer;
1181 sa = vma->vm_start; 1181 sa = vma->vm_start;
1182 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1182 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1183 ++k, sg = sg_next(sg)) { 1183 ++k, sg = sg_next(sg)) {
1184 len = vma->vm_end - sa; 1184 len = vma->vm_end - sa;
1185 len = (len < sg->length) ? len : sg->length; 1185 len = (len < sg->length) ? len : sg->length;
1186 if (offset < len) { 1186 if (offset < len) {
1187 struct page *page; 1187 struct page *page;
1188 page = virt_to_page(page_address(sg_page(sg)) + offset); 1188 page = virt_to_page(page_address(sg_page(sg)) + offset);
1189 get_page(page); /* increment page count */ 1189 get_page(page); /* increment page count */
1190 vmf->page = page; 1190 vmf->page = page;
1191 return 0; /* success */ 1191 return 0; /* success */
1192 } 1192 }
1193 sa += len; 1193 sa += len;
1194 offset -= len; 1194 offset -= len;
1195 } 1195 }
1196 1196
1197 return VM_FAULT_SIGBUS; 1197 return VM_FAULT_SIGBUS;
1198 } 1198 }
1199 1199
1200 static struct vm_operations_struct sg_mmap_vm_ops = { 1200 static struct vm_operations_struct sg_mmap_vm_ops = {
1201 .fault = sg_vma_fault, 1201 .fault = sg_vma_fault,
1202 }; 1202 };
1203 1203
1204 static int 1204 static int
1205 sg_mmap(struct file *filp, struct vm_area_struct *vma) 1205 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1206 { 1206 {
1207 Sg_fd *sfp; 1207 Sg_fd *sfp;
1208 unsigned long req_sz, len, sa; 1208 unsigned long req_sz, len, sa;
1209 Sg_scatter_hold *rsv_schp; 1209 Sg_scatter_hold *rsv_schp;
1210 int k; 1210 int k;
1211 struct scatterlist *sg; 1211 struct scatterlist *sg;
1212 1212
1213 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1213 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1214 return -ENXIO; 1214 return -ENXIO;
1215 req_sz = vma->vm_end - vma->vm_start; 1215 req_sz = vma->vm_end - vma->vm_start;
1216 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", 1216 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1217 (void *) vma->vm_start, (int) req_sz)); 1217 (void *) vma->vm_start, (int) req_sz));
1218 if (vma->vm_pgoff) 1218 if (vma->vm_pgoff)
1219 return -EINVAL; /* want no offset */ 1219 return -EINVAL; /* want no offset */
1220 rsv_schp = &sfp->reserve; 1220 rsv_schp = &sfp->reserve;
1221 if (req_sz > rsv_schp->bufflen) 1221 if (req_sz > rsv_schp->bufflen)
1222 return -ENOMEM; /* cannot map more than reserved buffer */ 1222 return -ENOMEM; /* cannot map more than reserved buffer */
1223 1223
1224 sa = vma->vm_start; 1224 sa = vma->vm_start;
1225 sg = rsv_schp->buffer; 1225 sg = rsv_schp->buffer;
1226 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1226 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1227 ++k, sg = sg_next(sg)) { 1227 ++k, sg = sg_next(sg)) {
1228 len = vma->vm_end - sa; 1228 len = vma->vm_end - sa;
1229 len = (len < sg->length) ? len : sg->length; 1229 len = (len < sg->length) ? len : sg->length;
1230 sa += len; 1230 sa += len;
1231 } 1231 }
1232 1232
1233 sfp->mmap_called = 1; 1233 sfp->mmap_called = 1;
1234 vma->vm_flags |= VM_RESERVED; 1234 vma->vm_flags |= VM_RESERVED;
1235 vma->vm_private_data = sfp; 1235 vma->vm_private_data = sfp;
1236 vma->vm_ops = &sg_mmap_vm_ops; 1236 vma->vm_ops = &sg_mmap_vm_ops;
1237 return 0; 1237 return 0;
1238 } 1238 }
1239 1239
1240 /* This function is a "bottom half" handler that is called by the 1240 /* This function is a "bottom half" handler that is called by the
1241 * mid level when a command is completed (or has failed). */ 1241 * mid level when a command is completed (or has failed). */
1242 static void 1242 static void
1243 sg_cmd_done(void *data, char *sense, int result, int resid) 1243 sg_cmd_done(void *data, char *sense, int result, int resid)
1244 { 1244 {
1245 Sg_request *srp = data; 1245 Sg_request *srp = data;
1246 Sg_device *sdp = NULL; 1246 Sg_device *sdp = NULL;
1247 Sg_fd *sfp; 1247 Sg_fd *sfp;
1248 unsigned long iflags; 1248 unsigned long iflags;
1249 unsigned int ms; 1249 unsigned int ms;
1250 1250
1251 if (NULL == srp) { 1251 if (NULL == srp) {
1252 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1252 printk(KERN_ERR "sg_cmd_done: NULL request\n");
1253 return; 1253 return;
1254 } 1254 }
1255 sfp = srp->parentfp; 1255 sfp = srp->parentfp;
1256 if (sfp) 1256 if (sfp)
1257 sdp = sfp->parentdp; 1257 sdp = sfp->parentdp;
1258 if ((NULL == sdp) || sdp->detached) { 1258 if ((NULL == sdp) || sdp->detached) {
1259 printk(KERN_INFO "sg_cmd_done: device detached\n"); 1259 printk(KERN_INFO "sg_cmd_done: device detached\n");
1260 return; 1260 return;
1261 } 1261 }
1262 1262
1263 1263
1264 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1264 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1265 sdp->disk->disk_name, srp->header.pack_id, result)); 1265 sdp->disk->disk_name, srp->header.pack_id, result));
1266 srp->header.resid = resid; 1266 srp->header.resid = resid;
1267 ms = jiffies_to_msecs(jiffies); 1267 ms = jiffies_to_msecs(jiffies);
1268 srp->header.duration = (ms > srp->header.duration) ? 1268 srp->header.duration = (ms > srp->header.duration) ?
1269 (ms - srp->header.duration) : 0; 1269 (ms - srp->header.duration) : 0;
1270 if (0 != result) { 1270 if (0 != result) {
1271 struct scsi_sense_hdr sshdr; 1271 struct scsi_sense_hdr sshdr;
1272 1272
1273 memcpy(srp->sense_b, sense, sizeof (srp->sense_b)); 1273 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1274 srp->header.status = 0xff & result; 1274 srp->header.status = 0xff & result;
1275 srp->header.masked_status = status_byte(result); 1275 srp->header.masked_status = status_byte(result);
1276 srp->header.msg_status = msg_byte(result); 1276 srp->header.msg_status = msg_byte(result);
1277 srp->header.host_status = host_byte(result); 1277 srp->header.host_status = host_byte(result);
1278 srp->header.driver_status = driver_byte(result); 1278 srp->header.driver_status = driver_byte(result);
1279 if ((sdp->sgdebug > 0) && 1279 if ((sdp->sgdebug > 0) &&
1280 ((CHECK_CONDITION == srp->header.masked_status) || 1280 ((CHECK_CONDITION == srp->header.masked_status) ||
1281 (COMMAND_TERMINATED == srp->header.masked_status))) 1281 (COMMAND_TERMINATED == srp->header.masked_status)))
1282 __scsi_print_sense("sg_cmd_done", sense, 1282 __scsi_print_sense("sg_cmd_done", sense,
1283 SCSI_SENSE_BUFFERSIZE); 1283 SCSI_SENSE_BUFFERSIZE);
1284 1284
1285 /* Following if statement is a patch supplied by Eric Youngdale */ 1285 /* Following if statement is a patch supplied by Eric Youngdale */
1286 if (driver_byte(result) != 0 1286 if (driver_byte(result) != 0
1287 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) 1287 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1288 && !scsi_sense_is_deferred(&sshdr) 1288 && !scsi_sense_is_deferred(&sshdr)
1289 && sshdr.sense_key == UNIT_ATTENTION 1289 && sshdr.sense_key == UNIT_ATTENTION
1290 && sdp->device->removable) { 1290 && sdp->device->removable) {
1291 /* Detected possible disc change. Set the bit - this */ 1291 /* Detected possible disc change. Set the bit - this */
1292 /* may be used if there are filesystems using this device */ 1292 /* may be used if there are filesystems using this device */
1293 sdp->device->changed = 1; 1293 sdp->device->changed = 1;
1294 } 1294 }
1295 } 1295 }
1296 /* Rely on write phase to clean out srp status values, so no "else" */ 1296 /* Rely on write phase to clean out srp status values, so no "else" */
1297 1297
1298 if (sfp->closed) { /* whoops this fd already released, cleanup */ 1298 if (sfp->closed) { /* whoops this fd already released, cleanup */
1299 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); 1299 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1300 sg_finish_rem_req(srp); 1300 sg_finish_rem_req(srp);
1301 srp = NULL; 1301 srp = NULL;
1302 if (NULL == sfp->headrp) { 1302 if (NULL == sfp->headrp) {
1303 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n")); 1303 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
1304 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */ 1304 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1305 scsi_device_put(sdp->device); 1305 scsi_device_put(sdp->device);
1306 } 1306 }
1307 sfp = NULL; 1307 sfp = NULL;
1308 } 1308 }
1309 } else if (srp && srp->orphan) { 1309 } else if (srp && srp->orphan) {
1310 if (sfp->keep_orphan) 1310 if (sfp->keep_orphan)
1311 srp->sg_io_owned = 0; 1311 srp->sg_io_owned = 0;
1312 else { 1312 else {
1313 sg_finish_rem_req(srp); 1313 sg_finish_rem_req(srp);
1314 srp = NULL; 1314 srp = NULL;
1315 } 1315 }
1316 } 1316 }
1317 if (sfp && srp) { 1317 if (sfp && srp) {
1318 /* Now wake up any sg_read() that is waiting for this packet. */ 1318 /* Now wake up any sg_read() that is waiting for this packet. */
1319 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); 1319 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1320 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1320 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1321 srp->done = 1; 1321 srp->done = 1;
1322 wake_up_interruptible(&sfp->read_wait); 1322 wake_up_interruptible(&sfp->read_wait);
1323 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1323 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1324 } 1324 }
1325 } 1325 }
1326 1326
1327 static struct file_operations sg_fops = { 1327 static struct file_operations sg_fops = {
1328 .owner = THIS_MODULE, 1328 .owner = THIS_MODULE,
1329 .read = sg_read, 1329 .read = sg_read,
1330 .write = sg_write, 1330 .write = sg_write,
1331 .poll = sg_poll, 1331 .poll = sg_poll,
1332 .ioctl = sg_ioctl, 1332 .ioctl = sg_ioctl,
1333 #ifdef CONFIG_COMPAT 1333 #ifdef CONFIG_COMPAT
1334 .compat_ioctl = sg_compat_ioctl, 1334 .compat_ioctl = sg_compat_ioctl,
1335 #endif 1335 #endif
1336 .open = sg_open, 1336 .open = sg_open,
1337 .mmap = sg_mmap, 1337 .mmap = sg_mmap,
1338 .release = sg_release, 1338 .release = sg_release,
1339 .fasync = sg_fasync, 1339 .fasync = sg_fasync,
1340 }; 1340 };
1341 1341
1342 static struct class *sg_sysfs_class; 1342 static struct class *sg_sysfs_class;
1343 1343
1344 static int sg_sysfs_valid = 0; 1344 static int sg_sysfs_valid = 0;
1345 1345
1346 static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) 1346 static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1347 { 1347 {
1348 struct request_queue *q = scsidp->request_queue; 1348 struct request_queue *q = scsidp->request_queue;
1349 Sg_device *sdp; 1349 Sg_device *sdp;
1350 unsigned long iflags; 1350 unsigned long iflags;
1351 int error; 1351 int error;
1352 u32 k; 1352 u32 k;
1353 1353
1354 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); 1354 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1355 if (!sdp) { 1355 if (!sdp) {
1356 printk(KERN_WARNING "kmalloc Sg_device failure\n"); 1356 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1357 return ERR_PTR(-ENOMEM); 1357 return ERR_PTR(-ENOMEM);
1358 } 1358 }
1359 error = -ENOMEM; 1359 error = -ENOMEM;
1360 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) { 1360 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1361 printk(KERN_WARNING "idr expansion Sg_device failure\n"); 1361 printk(KERN_WARNING "idr expansion Sg_device failure\n");
1362 goto out; 1362 goto out;
1363 } 1363 }
1364 1364
1365 write_lock_irqsave(&sg_index_lock, iflags); 1365 write_lock_irqsave(&sg_index_lock, iflags);
1366 error = idr_get_new(&sg_index_idr, sdp, &k); 1366 error = idr_get_new(&sg_index_idr, sdp, &k);
1367 write_unlock_irqrestore(&sg_index_lock, iflags); 1367 write_unlock_irqrestore(&sg_index_lock, iflags);
1368 1368
1369 if (error) { 1369 if (error) {
1370 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n", 1370 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1371 error); 1371 error);
1372 goto out; 1372 goto out;
1373 } 1373 }
1374 1374
1375 if (unlikely(k >= SG_MAX_DEVS)) 1375 if (unlikely(k >= SG_MAX_DEVS))
1376 goto overflow; 1376 goto overflow;
1377 1377
1378 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); 1378 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1379 sprintf(disk->disk_name, "sg%d", k); 1379 sprintf(disk->disk_name, "sg%d", k);
1380 disk->first_minor = k; 1380 disk->first_minor = k;
1381 sdp->disk = disk; 1381 sdp->disk = disk;
1382 sdp->device = scsidp; 1382 sdp->device = scsidp;
1383 init_waitqueue_head(&sdp->o_excl_wait); 1383 init_waitqueue_head(&sdp->o_excl_wait);
1384 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1384 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
1385 sdp->index = k; 1385 sdp->index = k;
1386 1386
1387 error = 0; 1387 error = 0;
1388 out: 1388 out:
1389 if (error) { 1389 if (error) {
1390 kfree(sdp); 1390 kfree(sdp);
1391 return ERR_PTR(error); 1391 return ERR_PTR(error);
1392 } 1392 }
1393 return sdp; 1393 return sdp;
1394 1394
1395 overflow: 1395 overflow:
1396 sdev_printk(KERN_WARNING, scsidp, 1396 sdev_printk(KERN_WARNING, scsidp,
1397 "Unable to attach sg device type=%d, minor " 1397 "Unable to attach sg device type=%d, minor "
1398 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); 1398 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1399 error = -ENODEV; 1399 error = -ENODEV;
1400 goto out; 1400 goto out;
1401 } 1401 }
1402 1402
1403 static int 1403 static int
1404 sg_add(struct device *cl_dev, struct class_interface *cl_intf) 1404 sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1405 { 1405 {
1406 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); 1406 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1407 struct gendisk *disk; 1407 struct gendisk *disk;
1408 Sg_device *sdp = NULL; 1408 Sg_device *sdp = NULL;
1409 struct cdev * cdev = NULL; 1409 struct cdev * cdev = NULL;
1410 int error; 1410 int error;
1411 unsigned long iflags; 1411 unsigned long iflags;
1412 1412
1413 disk = alloc_disk(1); 1413 disk = alloc_disk(1);
1414 if (!disk) { 1414 if (!disk) {
1415 printk(KERN_WARNING "alloc_disk failed\n"); 1415 printk(KERN_WARNING "alloc_disk failed\n");
1416 return -ENOMEM; 1416 return -ENOMEM;
1417 } 1417 }
1418 disk->major = SCSI_GENERIC_MAJOR; 1418 disk->major = SCSI_GENERIC_MAJOR;
1419 1419
1420 error = -ENOMEM; 1420 error = -ENOMEM;
1421 cdev = cdev_alloc(); 1421 cdev = cdev_alloc();
1422 if (!cdev) { 1422 if (!cdev) {
1423 printk(KERN_WARNING "cdev_alloc failed\n"); 1423 printk(KERN_WARNING "cdev_alloc failed\n");
1424 goto out; 1424 goto out;
1425 } 1425 }
1426 cdev->owner = THIS_MODULE; 1426 cdev->owner = THIS_MODULE;
1427 cdev->ops = &sg_fops; 1427 cdev->ops = &sg_fops;
1428 1428
1429 sdp = sg_alloc(disk, scsidp); 1429 sdp = sg_alloc(disk, scsidp);
1430 if (IS_ERR(sdp)) { 1430 if (IS_ERR(sdp)) {
1431 printk(KERN_WARNING "sg_alloc failed\n"); 1431 printk(KERN_WARNING "sg_alloc failed\n");
1432 error = PTR_ERR(sdp); 1432 error = PTR_ERR(sdp);
1433 goto out; 1433 goto out;
1434 } 1434 }
1435 1435
1436 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); 1436 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1437 if (error) 1437 if (error)
1438 goto cdev_add_err; 1438 goto cdev_add_err;
1439 1439
1440 sdp->cdev = cdev; 1440 sdp->cdev = cdev;
1441 if (sg_sysfs_valid) { 1441 if (sg_sysfs_valid) {
1442 struct device *sg_class_member; 1442 struct device *sg_class_member;
1443 1443
1444 sg_class_member = device_create_drvdata(sg_sysfs_class, 1444 sg_class_member = device_create_drvdata(sg_sysfs_class,
1445 cl_dev->parent, 1445 cl_dev->parent,
1446 MKDEV(SCSI_GENERIC_MAJOR, 1446 MKDEV(SCSI_GENERIC_MAJOR,
1447 sdp->index), 1447 sdp->index),
1448 sdp, 1448 sdp,
1449 "%s", disk->disk_name); 1449 "%s", disk->disk_name);
1450 if (IS_ERR(sg_class_member)) { 1450 if (IS_ERR(sg_class_member)) {
1451 printk(KERN_ERR "sg_add: " 1451 printk(KERN_ERR "sg_add: "
1452 "device_create failed\n"); 1452 "device_create failed\n");
1453 error = PTR_ERR(sg_class_member); 1453 error = PTR_ERR(sg_class_member);
1454 goto cdev_add_err; 1454 goto cdev_add_err;
1455 } 1455 }
1456 error = sysfs_create_link(&scsidp->sdev_gendev.kobj, 1456 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1457 &sg_class_member->kobj, "generic"); 1457 &sg_class_member->kobj, "generic");
1458 if (error) 1458 if (error)
1459 printk(KERN_ERR "sg_add: unable to make symlink " 1459 printk(KERN_ERR "sg_add: unable to make symlink "
1460 "'generic' back to sg%d\n", sdp->index); 1460 "'generic' back to sg%d\n", sdp->index);
1461 } else 1461 } else
1462 printk(KERN_WARNING "sg_add: sg_sys Invalid\n"); 1462 printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
1463 1463
1464 sdev_printk(KERN_NOTICE, scsidp, 1464 sdev_printk(KERN_NOTICE, scsidp,
1465 "Attached scsi generic sg%d type %d\n", sdp->index, 1465 "Attached scsi generic sg%d type %d\n", sdp->index,
1466 scsidp->type); 1466 scsidp->type);
1467 1467
1468 dev_set_drvdata(cl_dev, sdp); 1468 dev_set_drvdata(cl_dev, sdp);
1469 1469
1470 return 0; 1470 return 0;
1471 1471
1472 cdev_add_err: 1472 cdev_add_err:
1473 write_lock_irqsave(&sg_index_lock, iflags); 1473 write_lock_irqsave(&sg_index_lock, iflags);
1474 idr_remove(&sg_index_idr, sdp->index); 1474 idr_remove(&sg_index_idr, sdp->index);
1475 write_unlock_irqrestore(&sg_index_lock, iflags); 1475 write_unlock_irqrestore(&sg_index_lock, iflags);
1476 kfree(sdp); 1476 kfree(sdp);
1477 1477
1478 out: 1478 out:
1479 put_disk(disk); 1479 put_disk(disk);
1480 if (cdev) 1480 if (cdev)
1481 cdev_del(cdev); 1481 cdev_del(cdev);
1482 return error; 1482 return error;
1483 } 1483 }
1484 1484
1485 static void 1485 static void
1486 sg_remove(struct device *cl_dev, struct class_interface *cl_intf) 1486 sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1487 { 1487 {
1488 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); 1488 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1489 Sg_device *sdp = dev_get_drvdata(cl_dev); 1489 Sg_device *sdp = dev_get_drvdata(cl_dev);
1490 unsigned long iflags; 1490 unsigned long iflags;
1491 Sg_fd *sfp; 1491 Sg_fd *sfp;
1492 Sg_fd *tsfp; 1492 Sg_fd *tsfp;
1493 Sg_request *srp; 1493 Sg_request *srp;
1494 Sg_request *tsrp; 1494 Sg_request *tsrp;
1495 int delay; 1495 int delay;
1496 1496
1497 if (!sdp) 1497 if (!sdp)
1498 return; 1498 return;
1499 1499
1500 delay = 0; 1500 delay = 0;
1501 write_lock_irqsave(&sg_index_lock, iflags); 1501 write_lock_irqsave(&sg_index_lock, iflags);
1502 if (sdp->headfp) { 1502 if (sdp->headfp) {
1503 sdp->detached = 1; 1503 sdp->detached = 1;
1504 for (sfp = sdp->headfp; sfp; sfp = tsfp) { 1504 for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1505 tsfp = sfp->nextfp; 1505 tsfp = sfp->nextfp;
1506 for (srp = sfp->headrp; srp; srp = tsrp) { 1506 for (srp = sfp->headrp; srp; srp = tsrp) {
1507 tsrp = srp->nextrp; 1507 tsrp = srp->nextrp;
1508 if (sfp->closed || (0 == sg_srp_done(srp, sfp))) 1508 if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1509 sg_finish_rem_req(srp); 1509 sg_finish_rem_req(srp);
1510 } 1510 }
1511 if (sfp->closed) { 1511 if (sfp->closed) {
1512 scsi_device_put(sdp->device); 1512 scsi_device_put(sdp->device);
1513 __sg_remove_sfp(sdp, sfp); 1513 __sg_remove_sfp(sdp, sfp);
1514 } else { 1514 } else {
1515 delay = 1; 1515 delay = 1;
1516 wake_up_interruptible(&sfp->read_wait); 1516 wake_up_interruptible(&sfp->read_wait);
1517 kill_fasync(&sfp->async_qp, SIGPOLL, 1517 kill_fasync(&sfp->async_qp, SIGPOLL,
1518 POLL_HUP); 1518 POLL_HUP);
1519 } 1519 }
1520 } 1520 }
1521 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index)); 1521 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index));
1522 if (NULL == sdp->headfp) { 1522 if (NULL == sdp->headfp) {
1523 idr_remove(&sg_index_idr, sdp->index); 1523 idr_remove(&sg_index_idr, sdp->index);
1524 } 1524 }
1525 } else { /* nothing active, simple case */ 1525 } else { /* nothing active, simple case */
1526 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index)); 1526 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index));
1527 idr_remove(&sg_index_idr, sdp->index); 1527 idr_remove(&sg_index_idr, sdp->index);
1528 } 1528 }
1529 write_unlock_irqrestore(&sg_index_lock, iflags); 1529 write_unlock_irqrestore(&sg_index_lock, iflags);
1530 1530
1531 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1531 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1532 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); 1532 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1533 cdev_del(sdp->cdev); 1533 cdev_del(sdp->cdev);
1534 sdp->cdev = NULL; 1534 sdp->cdev = NULL;
1535 put_disk(sdp->disk); 1535 put_disk(sdp->disk);
1536 sdp->disk = NULL; 1536 sdp->disk = NULL;
1537 if (NULL == sdp->headfp) 1537 if (NULL == sdp->headfp)
1538 kfree(sdp); 1538 kfree(sdp);
1539 1539
1540 if (delay) 1540 if (delay)
1541 msleep(10); /* dirty detach so delay device destruction */ 1541 msleep(10); /* dirty detach so delay device destruction */
1542 } 1542 }
1543 1543
1544 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); 1544 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1545 module_param_named(def_reserved_size, def_reserved_size, int, 1545 module_param_named(def_reserved_size, def_reserved_size, int,
1546 S_IRUGO | S_IWUSR); 1546 S_IRUGO | S_IWUSR);
1547 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); 1547 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1548 1548
1549 MODULE_AUTHOR("Douglas Gilbert"); 1549 MODULE_AUTHOR("Douglas Gilbert");
1550 MODULE_DESCRIPTION("SCSI generic (sg) driver"); 1550 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1551 MODULE_LICENSE("GPL"); 1551 MODULE_LICENSE("GPL");
1552 MODULE_VERSION(SG_VERSION_STR); 1552 MODULE_VERSION(SG_VERSION_STR);
1553 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); 1553 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1554 1554
1555 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " 1555 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1556 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); 1556 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1557 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); 1557 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1558 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); 1558 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1559 1559
1560 static int __init 1560 static int __init
1561 init_sg(void) 1561 init_sg(void)
1562 { 1562 {
1563 int rc; 1563 int rc;
1564 1564
1565 if (scatter_elem_sz < PAGE_SIZE) { 1565 if (scatter_elem_sz < PAGE_SIZE) {
1566 scatter_elem_sz = PAGE_SIZE; 1566 scatter_elem_sz = PAGE_SIZE;
1567 scatter_elem_sz_prev = scatter_elem_sz; 1567 scatter_elem_sz_prev = scatter_elem_sz;
1568 } 1568 }
1569 if (def_reserved_size >= 0) 1569 if (def_reserved_size >= 0)
1570 sg_big_buff = def_reserved_size; 1570 sg_big_buff = def_reserved_size;
1571 else 1571 else
1572 def_reserved_size = sg_big_buff; 1572 def_reserved_size = sg_big_buff;
1573 1573
1574 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1574 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1575 SG_MAX_DEVS, "sg"); 1575 SG_MAX_DEVS, "sg");
1576 if (rc) 1576 if (rc)
1577 return rc; 1577 return rc;
1578 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); 1578 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1579 if ( IS_ERR(sg_sysfs_class) ) { 1579 if ( IS_ERR(sg_sysfs_class) ) {
1580 rc = PTR_ERR(sg_sysfs_class); 1580 rc = PTR_ERR(sg_sysfs_class);
1581 goto err_out; 1581 goto err_out;
1582 } 1582 }
1583 sg_sysfs_valid = 1; 1583 sg_sysfs_valid = 1;
1584 rc = scsi_register_interface(&sg_interface); 1584 rc = scsi_register_interface(&sg_interface);
1585 if (0 == rc) { 1585 if (0 == rc) {
1586 #ifdef CONFIG_SCSI_PROC_FS 1586 #ifdef CONFIG_SCSI_PROC_FS
1587 sg_proc_init(); 1587 sg_proc_init();
1588 #endif /* CONFIG_SCSI_PROC_FS */ 1588 #endif /* CONFIG_SCSI_PROC_FS */
1589 return 0; 1589 return 0;
1590 } 1590 }
1591 class_destroy(sg_sysfs_class); 1591 class_destroy(sg_sysfs_class);
1592 err_out: 1592 err_out:
1593 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); 1593 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1594 return rc; 1594 return rc;
1595 } 1595 }
1596 1596
1597 static void __exit 1597 static void __exit
1598 exit_sg(void) 1598 exit_sg(void)
1599 { 1599 {
1600 #ifdef CONFIG_SCSI_PROC_FS 1600 #ifdef CONFIG_SCSI_PROC_FS
1601 sg_proc_cleanup(); 1601 sg_proc_cleanup();
1602 #endif /* CONFIG_SCSI_PROC_FS */ 1602 #endif /* CONFIG_SCSI_PROC_FS */
1603 scsi_unregister_interface(&sg_interface); 1603 scsi_unregister_interface(&sg_interface);
1604 class_destroy(sg_sysfs_class); 1604 class_destroy(sg_sysfs_class);
1605 sg_sysfs_valid = 0; 1605 sg_sysfs_valid = 0;
1606 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1606 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1607 SG_MAX_DEVS); 1607 SG_MAX_DEVS);
1608 idr_destroy(&sg_index_idr); 1608 idr_destroy(&sg_index_idr);
1609 } 1609 }
1610 1610
1611 static int 1611 static int
1612 sg_start_req(Sg_request * srp) 1612 sg_start_req(Sg_request * srp)
1613 { 1613 {
1614 int res; 1614 int res;
1615 Sg_fd *sfp = srp->parentfp; 1615 Sg_fd *sfp = srp->parentfp;
1616 sg_io_hdr_t *hp = &srp->header; 1616 sg_io_hdr_t *hp = &srp->header;
1617 int dxfer_len = (int) hp->dxfer_len; 1617 int dxfer_len = (int) hp->dxfer_len;
1618 int dxfer_dir = hp->dxfer_direction; 1618 int dxfer_dir = hp->dxfer_direction;
1619 Sg_scatter_hold *req_schp = &srp->data; 1619 Sg_scatter_hold *req_schp = &srp->data;
1620 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1620 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1621 1621
1622 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); 1622 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1623 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1623 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1624 return 0; 1624 return 0;
1625 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1625 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1626 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1626 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1627 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1627 (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1628 res = sg_build_direct(srp, sfp, dxfer_len); 1628 res = sg_build_direct(srp, sfp, dxfer_len);
1629 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1629 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1630 return res; 1630 return res;
1631 } 1631 }
1632 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1632 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1633 sg_link_reserve(sfp, srp, dxfer_len); 1633 sg_link_reserve(sfp, srp, dxfer_len);
1634 else { 1634 else {
1635 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1635 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1636 if (res) { 1636 if (res) {
1637 sg_remove_scat(req_schp); 1637 sg_remove_scat(req_schp);
1638 return res; 1638 return res;
1639 } 1639 }
1640 } 1640 }
1641 return 0; 1641 return 0;
1642 } 1642 }
1643 1643
1644 static void 1644 static void
1645 sg_finish_rem_req(Sg_request * srp) 1645 sg_finish_rem_req(Sg_request * srp)
1646 { 1646 {
1647 Sg_fd *sfp = srp->parentfp; 1647 Sg_fd *sfp = srp->parentfp;
1648 Sg_scatter_hold *req_schp = &srp->data; 1648 Sg_scatter_hold *req_schp = &srp->data;
1649 1649
1650 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); 1650 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1651 if (srp->res_used) 1651 if (srp->res_used)
1652 sg_unlink_reserve(sfp, srp); 1652 sg_unlink_reserve(sfp, srp);
1653 else 1653 else
1654 sg_remove_scat(req_schp); 1654 sg_remove_scat(req_schp);
1655 sg_remove_request(sfp, srp); 1655 sg_remove_request(sfp, srp);
1656 } 1656 }
1657 1657
1658 static int 1658 static int
1659 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1659 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1660 { 1660 {
1661 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1661 int sg_bufflen = tablesize * sizeof(struct scatterlist);
1662 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1662 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1663 1663
1664 /* 1664 /*
1665 * TODO: test without low_dma, we should not need it since 1665 * TODO: test without low_dma, we should not need it since
1666 * the block layer will bounce the buffer for us 1666 * the block layer will bounce the buffer for us
1667 * 1667 *
1668 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list. 1668 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1669 */ 1669 */
1670 if (sfp->low_dma) 1670 if (sfp->low_dma)
1671 gfp_flags |= GFP_DMA; 1671 gfp_flags |= GFP_DMA;
1672 schp->buffer = kzalloc(sg_bufflen, gfp_flags); 1672 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1673 if (!schp->buffer) 1673 if (!schp->buffer)
1674 return -ENOMEM; 1674 return -ENOMEM;
1675 sg_init_table(schp->buffer, tablesize); 1675 sg_init_table(schp->buffer, tablesize);
1676 schp->sglist_len = sg_bufflen; 1676 schp->sglist_len = sg_bufflen;
1677 return tablesize; /* number of scat_gath elements allocated */ 1677 return tablesize; /* number of scat_gath elements allocated */
1678 } 1678 }
1679 1679
1680 #ifdef SG_ALLOW_DIO_CODE 1680 #ifdef SG_ALLOW_DIO_CODE
1681 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ 1681 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1682 /* TODO: hopefully we can use the generic block layer code */ 1682 /* TODO: hopefully we can use the generic block layer code */
1683 1683
1684 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if 1684 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1685 - mapping of all pages not successful 1685 - mapping of all pages not successful
1686 (i.e., either completely successful or fails) 1686 (i.e., either completely successful or fails)
1687 */ 1687 */
1688 static int 1688 static int
1689 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 1689 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1690 unsigned long uaddr, size_t count, int rw) 1690 unsigned long uaddr, size_t count, int rw)
1691 { 1691 {
1692 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 1692 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1693 unsigned long start = uaddr >> PAGE_SHIFT; 1693 unsigned long start = uaddr >> PAGE_SHIFT;
1694 const int nr_pages = end - start; 1694 const int nr_pages = end - start;
1695 int res, i, j; 1695 int res, i, j;
1696 struct page **pages; 1696 struct page **pages;
1697 1697
1698 /* User attempted Overflow! */ 1698 /* User attempted Overflow! */
1699 if ((uaddr + count) < uaddr) 1699 if ((uaddr + count) < uaddr)
1700 return -EINVAL; 1700 return -EINVAL;
1701 1701
1702 /* Too big */ 1702 /* Too big */
1703 if (nr_pages > max_pages) 1703 if (nr_pages > max_pages)
1704 return -ENOMEM; 1704 return -ENOMEM;
1705 1705
1706 /* Hmm? */ 1706 /* Hmm? */
1707 if (count == 0) 1707 if (count == 0)
1708 return 0; 1708 return 0;
1709 1709
1710 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL) 1710 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1711 return -ENOMEM; 1711 return -ENOMEM;
1712 1712
1713 /* Try to fault in all of the necessary pages */ 1713 /* Try to fault in all of the necessary pages */
1714 down_read(&current->mm->mmap_sem); 1714 down_read(&current->mm->mmap_sem);
1715 /* rw==READ means read from drive, write into memory area */ 1715 /* rw==READ means read from drive, write into memory area */
1716 res = get_user_pages( 1716 res = get_user_pages(
1717 current, 1717 current,
1718 current->mm, 1718 current->mm,
1719 uaddr, 1719 uaddr,
1720 nr_pages, 1720 nr_pages,
1721 rw == READ, 1721 rw == READ,
1722 0, /* don't force */ 1722 0, /* don't force */
1723 pages, 1723 pages,
1724 NULL); 1724 NULL);
1725 up_read(&current->mm->mmap_sem); 1725 up_read(&current->mm->mmap_sem);
1726 1726
1727 /* Errors and no page mapped should return here */ 1727 /* Errors and no page mapped should return here */
1728 if (res < nr_pages) 1728 if (res < nr_pages)
1729 goto out_unmap; 1729 goto out_unmap;
1730 1730
1731 for (i=0; i < nr_pages; i++) { 1731 for (i=0; i < nr_pages; i++) {
1732 /* FIXME: flush superflous for rw==READ, 1732 /* FIXME: flush superflous for rw==READ,
1733 * probably wrong function for rw==WRITE 1733 * probably wrong function for rw==WRITE
1734 */ 1734 */
1735 flush_dcache_page(pages[i]); 1735 flush_dcache_page(pages[i]);
1736 /* ?? Is locking needed? I don't think so */ 1736 /* ?? Is locking needed? I don't think so */
1737 /* if (TestSetPageLocked(pages[i])) 1737 /* if (TestSetPageLocked(pages[i]))
1738 goto out_unlock; */ 1738 goto out_unlock; */
1739 } 1739 }
1740 1740
1741 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK); 1741 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1742 if (nr_pages > 1) { 1742 if (nr_pages > 1) {
1743 sgl[0].length = PAGE_SIZE - sgl[0].offset; 1743 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1744 count -= sgl[0].length; 1744 count -= sgl[0].length;
1745 for (i=1; i < nr_pages ; i++) 1745 for (i=1; i < nr_pages ; i++)
1746 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0); 1746 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1747 } 1747 }
1748 else { 1748 else {
1749 sgl[0].length = count; 1749 sgl[0].length = count;
1750 } 1750 }
1751 1751
1752 kfree(pages); 1752 kfree(pages);
1753 return nr_pages; 1753 return nr_pages;
1754 1754
1755 out_unmap: 1755 out_unmap:
1756 if (res > 0) { 1756 if (res > 0) {
1757 for (j=0; j < res; j++) 1757 for (j=0; j < res; j++)
1758 page_cache_release(pages[j]); 1758 page_cache_release(pages[j]);
1759 res = 0; 1759 res = 0;
1760 } 1760 }
1761 kfree(pages); 1761 kfree(pages);
1762 return res; 1762 return res;
1763 } 1763 }
1764 1764
1765 1765
1766 /* And unmap them... */ 1766 /* And unmap them... */
1767 static int 1767 static int
1768 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 1768 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1769 int dirtied) 1769 int dirtied)
1770 { 1770 {
1771 int i; 1771 int i;
1772 1772
1773 for (i=0; i < nr_pages; i++) { 1773 for (i=0; i < nr_pages; i++) {
1774 struct page *page = sg_page(&sgl[i]); 1774 struct page *page = sg_page(&sgl[i]);
1775 1775
1776 if (dirtied) 1776 if (dirtied)
1777 SetPageDirty(page); 1777 SetPageDirty(page);
1778 /* unlock_page(page); */ 1778 /* unlock_page(page); */
1779 /* FIXME: cache flush missing for rw==READ 1779 /* FIXME: cache flush missing for rw==READ
1780 * FIXME: call the correct reference counting function 1780 * FIXME: call the correct reference counting function
1781 */ 1781 */
1782 page_cache_release(page); 1782 page_cache_release(page);
1783 } 1783 }
1784 1784
1785 return 0; 1785 return 0;
1786 } 1786 }
1787 1787
1788 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */ 1788 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1789 #endif 1789 #endif
1790 1790
1791 1791
1792 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ 1792 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1793 static int 1793 static int
1794 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) 1794 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1795 { 1795 {
1796 #ifdef SG_ALLOW_DIO_CODE 1796 #ifdef SG_ALLOW_DIO_CODE
1797 sg_io_hdr_t *hp = &srp->header; 1797 sg_io_hdr_t *hp = &srp->header;
1798 Sg_scatter_hold *schp = &srp->data; 1798 Sg_scatter_hold *schp = &srp->data;
1799 int sg_tablesize = sfp->parentdp->sg_tablesize; 1799 int sg_tablesize = sfp->parentdp->sg_tablesize;
1800 int mx_sc_elems, res; 1800 int mx_sc_elems, res;
1801 struct scsi_device *sdev = sfp->parentdp->device; 1801 struct scsi_device *sdev = sfp->parentdp->device;
1802 1802
1803 if (((unsigned long)hp->dxferp & 1803 if (((unsigned long)hp->dxferp &
1804 queue_dma_alignment(sdev->request_queue)) != 0) 1804 queue_dma_alignment(sdev->request_queue)) != 0)
1805 return 1; 1805 return 1;
1806 1806
1807 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1807 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1808 if (mx_sc_elems <= 0) { 1808 if (mx_sc_elems <= 0) {
1809 return 1; 1809 return 1;
1810 } 1810 }
1811 res = st_map_user_pages(schp->buffer, mx_sc_elems, 1811 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1812 (unsigned long)hp->dxferp, dxfer_len, 1812 (unsigned long)hp->dxferp, dxfer_len,
1813 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); 1813 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1814 if (res <= 0) { 1814 if (res <= 0) {
1815 sg_remove_scat(schp); 1815 sg_remove_scat(schp);
1816 return 1; 1816 return 1;
1817 } 1817 }
1818 schp->k_use_sg = res; 1818 schp->k_use_sg = res;
1819 schp->dio_in_use = 1; 1819 schp->dio_in_use = 1;
1820 hp->info |= SG_INFO_DIRECT_IO; 1820 hp->info |= SG_INFO_DIRECT_IO;
1821 return 0; 1821 return 0;
1822 #else 1822 #else
1823 return 1; 1823 return 1;
1824 #endif 1824 #endif
1825 } 1825 }
1826 1826
1827 static int 1827 static int
1828 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1828 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1829 { 1829 {
1830 struct scatterlist *sg; 1830 struct scatterlist *sg;
1831 int ret_sz = 0, k, rem_sz, num, mx_sc_elems; 1831 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1832 int sg_tablesize = sfp->parentdp->sg_tablesize; 1832 int sg_tablesize = sfp->parentdp->sg_tablesize;
1833 int blk_size = buff_size; 1833 int blk_size = buff_size;
1834 struct page *p = NULL; 1834 struct page *p = NULL;
1835 1835
1836 if (blk_size < 0) 1836 if (blk_size < 0)
1837 return -EFAULT; 1837 return -EFAULT;
1838 if (0 == blk_size) 1838 if (0 == blk_size)
1839 ++blk_size; /* don't know why */ 1839 ++blk_size; /* don't know why */
1840 /* round request up to next highest SG_SECTOR_SZ byte boundary */ 1840 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1841 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); 1841 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1842 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", 1842 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1843 buff_size, blk_size)); 1843 buff_size, blk_size));
1844 1844
1845 /* N.B. ret_sz carried into this block ... */ 1845 /* N.B. ret_sz carried into this block ... */
1846 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1846 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1847 if (mx_sc_elems < 0) 1847 if (mx_sc_elems < 0)
1848 return mx_sc_elems; /* most likely -ENOMEM */ 1848 return mx_sc_elems; /* most likely -ENOMEM */
1849 1849
1850 num = scatter_elem_sz; 1850 num = scatter_elem_sz;
1851 if (unlikely(num != scatter_elem_sz_prev)) { 1851 if (unlikely(num != scatter_elem_sz_prev)) {
1852 if (num < PAGE_SIZE) { 1852 if (num < PAGE_SIZE) {
1853 scatter_elem_sz = PAGE_SIZE; 1853 scatter_elem_sz = PAGE_SIZE;
1854 scatter_elem_sz_prev = PAGE_SIZE; 1854 scatter_elem_sz_prev = PAGE_SIZE;
1855 } else 1855 } else
1856 scatter_elem_sz_prev = num; 1856 scatter_elem_sz_prev = num;
1857 } 1857 }
1858 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1858 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1859 (rem_sz > 0) && (k < mx_sc_elems); 1859 (rem_sz > 0) && (k < mx_sc_elems);
1860 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { 1860 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
1861 1861
1862 num = (rem_sz > scatter_elem_sz_prev) ? 1862 num = (rem_sz > scatter_elem_sz_prev) ?
1863 scatter_elem_sz_prev : rem_sz; 1863 scatter_elem_sz_prev : rem_sz;
1864 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1864 p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1865 if (!p) 1865 if (!p)
1866 return -ENOMEM; 1866 return -ENOMEM;
1867 1867
1868 if (num == scatter_elem_sz_prev) { 1868 if (num == scatter_elem_sz_prev) {
1869 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1869 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1870 scatter_elem_sz = ret_sz; 1870 scatter_elem_sz = ret_sz;
1871 scatter_elem_sz_prev = ret_sz; 1871 scatter_elem_sz_prev = ret_sz;
1872 } 1872 }
1873 } 1873 }
1874 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0); 1874 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1875 1875
1876 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1876 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1877 "ret_sz=%d\n", k, num, ret_sz)); 1877 "ret_sz=%d\n", k, num, ret_sz));
1878 } /* end of for loop */ 1878 } /* end of for loop */
1879 1879
1880 schp->k_use_sg = k; 1880 schp->k_use_sg = k;
1881 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1881 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1882 "rem_sz=%d\n", k, rem_sz)); 1882 "rem_sz=%d\n", k, rem_sz));
1883 1883
1884 schp->bufflen = blk_size; 1884 schp->bufflen = blk_size;
1885 if (rem_sz > 0) /* must have failed */ 1885 if (rem_sz > 0) /* must have failed */
1886 return -ENOMEM; 1886 return -ENOMEM;
1887 1887
1888 return 0; 1888 return 0;
1889 } 1889 }
1890 1890
1891 static int 1891 static int
1892 sg_write_xfer(Sg_request * srp) 1892 sg_write_xfer(Sg_request * srp)
1893 { 1893 {
1894 sg_io_hdr_t *hp = &srp->header; 1894 sg_io_hdr_t *hp = &srp->header;
1895 Sg_scatter_hold *schp = &srp->data; 1895 Sg_scatter_hold *schp = &srp->data;
1896 struct scatterlist *sg = schp->buffer; 1896 struct scatterlist *sg = schp->buffer;
1897 int num_xfer = 0; 1897 int num_xfer = 0;
1898 int j, k, onum, usglen, ksglen, res; 1898 int j, k, onum, usglen, ksglen, res;
1899 int iovec_count = (int) hp->iovec_count; 1899 int iovec_count = (int) hp->iovec_count;
1900 int dxfer_dir = hp->dxfer_direction; 1900 int dxfer_dir = hp->dxfer_direction;
1901 unsigned char *p; 1901 unsigned char *p;
1902 unsigned char __user *up; 1902 unsigned char __user *up;
1903 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 1903 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1904 1904
1905 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || 1905 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1906 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 1906 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1907 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags); 1907 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1908 if (schp->bufflen < num_xfer) 1908 if (schp->bufflen < num_xfer)
1909 num_xfer = schp->bufflen; 1909 num_xfer = schp->bufflen;
1910 } 1910 }
1911 if ((num_xfer <= 0) || (schp->dio_in_use) || 1911 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1912 (new_interface 1912 (new_interface
1913 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 1913 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1914 return 0; 1914 return 0;
1915 1915
1916 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 1916 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1917 num_xfer, iovec_count, schp->k_use_sg)); 1917 num_xfer, iovec_count, schp->k_use_sg));
1918 if (iovec_count) { 1918 if (iovec_count) {
1919 onum = iovec_count; 1919 onum = iovec_count;
1920 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 1920 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1921 return -EFAULT; 1921 return -EFAULT;
1922 } else 1922 } else
1923 onum = 1; 1923 onum = 1;
1924 1924
1925 ksglen = sg->length; 1925 ksglen = sg->length;
1926 p = page_address(sg_page(sg)); 1926 p = page_address(sg_page(sg));
1927 for (j = 0, k = 0; j < onum; ++j) { 1927 for (j = 0, k = 0; j < onum; ++j) {
1928 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); 1928 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1929 if (res) 1929 if (res)
1930 return res; 1930 return res;
1931 1931
1932 for (; p; sg = sg_next(sg), ksglen = sg->length, 1932 for (; p; sg = sg_next(sg), ksglen = sg->length,
1933 p = page_address(sg_page(sg))) { 1933 p = page_address(sg_page(sg))) {
1934 if (usglen <= 0) 1934 if (usglen <= 0)
1935 break; 1935 break;
1936 if (ksglen > usglen) { 1936 if (ksglen > usglen) {
1937 if (usglen >= num_xfer) { 1937 if (usglen >= num_xfer) {
1938 if (__copy_from_user(p, up, num_xfer)) 1938 if (__copy_from_user(p, up, num_xfer))
1939 return -EFAULT; 1939 return -EFAULT;
1940 return 0; 1940 return 0;
1941 } 1941 }
1942 if (__copy_from_user(p, up, usglen)) 1942 if (__copy_from_user(p, up, usglen))
1943 return -EFAULT; 1943 return -EFAULT;
1944 p += usglen; 1944 p += usglen;
1945 ksglen -= usglen; 1945 ksglen -= usglen;
1946 break; 1946 break;
1947 } else { 1947 } else {
1948 if (ksglen >= num_xfer) { 1948 if (ksglen >= num_xfer) {
1949 if (__copy_from_user(p, up, num_xfer)) 1949 if (__copy_from_user(p, up, num_xfer))
1950 return -EFAULT; 1950 return -EFAULT;
1951 return 0; 1951 return 0;
1952 } 1952 }
1953 if (__copy_from_user(p, up, ksglen)) 1953 if (__copy_from_user(p, up, ksglen))
1954 return -EFAULT; 1954 return -EFAULT;
1955 up += ksglen; 1955 up += ksglen;
1956 usglen -= ksglen; 1956 usglen -= ksglen;
1957 } 1957 }
1958 ++k; 1958 ++k;
1959 if (k >= schp->k_use_sg) 1959 if (k >= schp->k_use_sg)
1960 return 0; 1960 return 0;
1961 } 1961 }
1962 } 1962 }
1963 1963
1964 return 0; 1964 return 0;
1965 } 1965 }
1966 1966
1967 static int 1967 static int
1968 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1968 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
1969 int wr_xf, int *countp, unsigned char __user **up) 1969 int wr_xf, int *countp, unsigned char __user **up)
1970 { 1970 {
1971 int num_xfer = (int) hp->dxfer_len; 1971 int num_xfer = (int) hp->dxfer_len;
1972 unsigned char __user *p = hp->dxferp; 1972 unsigned char __user *p = hp->dxferp;
1973 int count; 1973 int count;
1974 1974
1975 if (0 == sg_num) { 1975 if (0 == sg_num) {
1976 if (wr_xf && ('\0' == hp->interface_id)) 1976 if (wr_xf && ('\0' == hp->interface_id))
1977 count = (int) hp->flags; /* holds "old" input_size */ 1977 count = (int) hp->flags; /* holds "old" input_size */
1978 else 1978 else
1979 count = num_xfer; 1979 count = num_xfer;
1980 } else { 1980 } else {
1981 sg_iovec_t iovec; 1981 sg_iovec_t iovec;
1982 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC)) 1982 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
1983 return -EFAULT; 1983 return -EFAULT;
1984 p = iovec.iov_base; 1984 p = iovec.iov_base;
1985 count = (int) iovec.iov_len; 1985 count = (int) iovec.iov_len;
1986 } 1986 }
1987 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)) 1987 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
1988 return -EFAULT; 1988 return -EFAULT;
1989 if (up) 1989 if (up)
1990 *up = p; 1990 *up = p;
1991 if (countp) 1991 if (countp)
1992 *countp = count; 1992 *countp = count;
1993 return 0; 1993 return 0;
1994 } 1994 }
1995 1995
1996 static void 1996 static void
1997 sg_remove_scat(Sg_scatter_hold * schp) 1997 sg_remove_scat(Sg_scatter_hold * schp)
1998 { 1998 {
1999 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 1999 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2000 if (schp->buffer && (schp->sglist_len > 0)) { 2000 if (schp->buffer && (schp->sglist_len > 0)) {
2001 struct scatterlist *sg = schp->buffer; 2001 struct scatterlist *sg = schp->buffer;
2002 2002
2003 if (schp->dio_in_use) { 2003 if (schp->dio_in_use) {
2004 #ifdef SG_ALLOW_DIO_CODE 2004 #ifdef SG_ALLOW_DIO_CODE
2005 st_unmap_user_pages(sg, schp->k_use_sg, TRUE); 2005 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2006 #endif 2006 #endif
2007 } else { 2007 } else {
2008 int k; 2008 int k;
2009 2009
2010 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); 2010 for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
2011 ++k, sg = sg_next(sg)) { 2011 ++k, sg = sg_next(sg)) {
2012 SCSI_LOG_TIMEOUT(5, printk( 2012 SCSI_LOG_TIMEOUT(5, printk(
2013 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 2013 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
2014 k, sg_page(sg), sg->length)); 2014 k, sg_page(sg), sg->length));
2015 sg_page_free(sg_page(sg), sg->length); 2015 sg_page_free(sg_page(sg), sg->length);
2016 } 2016 }
2017 } 2017 }
2018 kfree(schp->buffer); 2018 kfree(schp->buffer);
2019 } 2019 }
2020 memset(schp, 0, sizeof (*schp)); 2020 memset(schp, 0, sizeof (*schp));
2021 } 2021 }
2022 2022
2023 static int 2023 static int
2024 sg_read_xfer(Sg_request * srp) 2024 sg_read_xfer(Sg_request * srp)
2025 { 2025 {
2026 sg_io_hdr_t *hp = &srp->header; 2026 sg_io_hdr_t *hp = &srp->header;
2027 Sg_scatter_hold *schp = &srp->data; 2027 Sg_scatter_hold *schp = &srp->data;
2028 struct scatterlist *sg = schp->buffer; 2028 struct scatterlist *sg = schp->buffer;
2029 int num_xfer = 0; 2029 int num_xfer = 0;
2030 int j, k, onum, usglen, ksglen, res; 2030 int j, k, onum, usglen, ksglen, res;
2031 int iovec_count = (int) hp->iovec_count; 2031 int iovec_count = (int) hp->iovec_count;
2032 int dxfer_dir = hp->dxfer_direction; 2032 int dxfer_dir = hp->dxfer_direction;
2033 unsigned char *p; 2033 unsigned char *p;
2034 unsigned char __user *up; 2034 unsigned char __user *up;
2035 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 2035 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2036 2036
2037 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) 2037 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2038 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 2038 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2039 num_xfer = hp->dxfer_len; 2039 num_xfer = hp->dxfer_len;
2040 if (schp->bufflen < num_xfer) 2040 if (schp->bufflen < num_xfer)
2041 num_xfer = schp->bufflen; 2041 num_xfer = schp->bufflen;
2042 } 2042 }
2043 if ((num_xfer <= 0) || (schp->dio_in_use) || 2043 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2044 (new_interface 2044 (new_interface
2045 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 2045 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2046 return 0; 2046 return 0;
2047 2047
2048 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 2048 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2049 num_xfer, iovec_count, schp->k_use_sg)); 2049 num_xfer, iovec_count, schp->k_use_sg));
2050 if (iovec_count) { 2050 if (iovec_count) {
2051 onum = iovec_count; 2051 onum = iovec_count;
2052 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 2052 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2053 return -EFAULT; 2053 return -EFAULT;
2054 } else 2054 } else
2055 onum = 1; 2055 onum = 1;
2056 2056
2057 p = page_address(sg_page(sg)); 2057 p = page_address(sg_page(sg));
2058 ksglen = sg->length; 2058 ksglen = sg->length;
2059 for (j = 0, k = 0; j < onum; ++j) { 2059 for (j = 0, k = 0; j < onum; ++j) {
2060 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); 2060 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2061 if (res) 2061 if (res)
2062 return res; 2062 return res;
2063 2063
2064 for (; p; sg = sg_next(sg), ksglen = sg->length, 2064 for (; p; sg = sg_next(sg), ksglen = sg->length,
2065 p = page_address(sg_page(sg))) { 2065 p = page_address(sg_page(sg))) {
2066 if (usglen <= 0) 2066 if (usglen <= 0)
2067 break; 2067 break;
2068 if (ksglen > usglen) { 2068 if (ksglen > usglen) {
2069 if (usglen >= num_xfer) { 2069 if (usglen >= num_xfer) {
2070 if (__copy_to_user(up, p, num_xfer)) 2070 if (__copy_to_user(up, p, num_xfer))
2071 return -EFAULT; 2071 return -EFAULT;
2072 return 0; 2072 return 0;
2073 } 2073 }
2074 if (__copy_to_user(up, p, usglen)) 2074 if (__copy_to_user(up, p, usglen))
2075 return -EFAULT; 2075 return -EFAULT;
2076 p += usglen; 2076 p += usglen;
2077 ksglen -= usglen; 2077 ksglen -= usglen;
2078 break; 2078 break;
2079 } else { 2079 } else {
2080 if (ksglen >= num_xfer) { 2080 if (ksglen >= num_xfer) {
2081 if (__copy_to_user(up, p, num_xfer)) 2081 if (__copy_to_user(up, p, num_xfer))
2082 return -EFAULT; 2082 return -EFAULT;
2083 return 0; 2083 return 0;
2084 } 2084 }
2085 if (__copy_to_user(up, p, ksglen)) 2085 if (__copy_to_user(up, p, ksglen))
2086 return -EFAULT; 2086 return -EFAULT;
2087 up += ksglen; 2087 up += ksglen;
2088 usglen -= ksglen; 2088 usglen -= ksglen;
2089 } 2089 }
2090 ++k; 2090 ++k;
2091 if (k >= schp->k_use_sg) 2091 if (k >= schp->k_use_sg)
2092 return 0; 2092 return 0;
2093 } 2093 }
2094 } 2094 }
2095 2095
2096 return 0; 2096 return 0;
2097 } 2097 }
2098 2098
2099 static int 2099 static int
2100 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 2100 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2101 { 2101 {
2102 Sg_scatter_hold *schp = &srp->data; 2102 Sg_scatter_hold *schp = &srp->data;
2103 struct scatterlist *sg = schp->buffer; 2103 struct scatterlist *sg = schp->buffer;
2104 int k, num; 2104 int k, num;
2105 2105
2106 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 2106 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2107 num_read_xfer)); 2107 num_read_xfer));
2108 if ((!outp) || (num_read_xfer <= 0)) 2108 if ((!outp) || (num_read_xfer <= 0))
2109 return 0; 2109 return 0;
2110 2110
2111 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { 2111 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
2112 num = sg->length; 2112 num = sg->length;
2113 if (num > num_read_xfer) { 2113 if (num > num_read_xfer) {
2114 if (__copy_to_user(outp, page_address(sg_page(sg)), 2114 if (__copy_to_user(outp, page_address(sg_page(sg)),
2115 num_read_xfer)) 2115 num_read_xfer))
2116 return -EFAULT; 2116 return -EFAULT;
2117 break; 2117 break;
2118 } else { 2118 } else {
2119 if (__copy_to_user(outp, page_address(sg_page(sg)), 2119 if (__copy_to_user(outp, page_address(sg_page(sg)),
2120 num)) 2120 num))
2121 return -EFAULT; 2121 return -EFAULT;
2122 num_read_xfer -= num; 2122 num_read_xfer -= num;
2123 if (num_read_xfer <= 0) 2123 if (num_read_xfer <= 0)
2124 break; 2124 break;
2125 outp += num; 2125 outp += num;
2126 } 2126 }
2127 } 2127 }
2128 2128
2129 return 0; 2129 return 0;
2130 } 2130 }
2131 2131
2132 static void 2132 static void
2133 sg_build_reserve(Sg_fd * sfp, int req_size) 2133 sg_build_reserve(Sg_fd * sfp, int req_size)
2134 { 2134 {
2135 Sg_scatter_hold *schp = &sfp->reserve; 2135 Sg_scatter_hold *schp = &sfp->reserve;
2136 2136
2137 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); 2137 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2138 do { 2138 do {
2139 if (req_size < PAGE_SIZE) 2139 if (req_size < PAGE_SIZE)
2140 req_size = PAGE_SIZE; 2140 req_size = PAGE_SIZE;
2141 if (0 == sg_build_indirect(schp, sfp, req_size)) 2141 if (0 == sg_build_indirect(schp, sfp, req_size))
2142 return; 2142 return;
2143 else 2143 else
2144 sg_remove_scat(schp); 2144 sg_remove_scat(schp);
2145 req_size >>= 1; /* divide by 2 */ 2145 req_size >>= 1; /* divide by 2 */
2146 } while (req_size > (PAGE_SIZE / 2)); 2146 } while (req_size > (PAGE_SIZE / 2));
2147 } 2147 }
2148 2148
2149 static void 2149 static void
2150 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) 2150 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2151 { 2151 {
2152 Sg_scatter_hold *req_schp = &srp->data; 2152 Sg_scatter_hold *req_schp = &srp->data;
2153 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2153 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2154 struct scatterlist *sg = rsv_schp->buffer; 2154 struct scatterlist *sg = rsv_schp->buffer;
2155 int k, num, rem; 2155 int k, num, rem;
2156 2156
2157 srp->res_used = 1; 2157 srp->res_used = 1;
2158 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 2158 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2159 rem = size; 2159 rem = size;
2160 2160
2161 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { 2161 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
2162 num = sg->length; 2162 num = sg->length;
2163 if (rem <= num) { 2163 if (rem <= num) {
2164 sfp->save_scat_len = num; 2164 sfp->save_scat_len = num;
2165 sg->length = rem; 2165 sg->length = rem;
2166 req_schp->k_use_sg = k + 1; 2166 req_schp->k_use_sg = k + 1;
2167 req_schp->sglist_len = rsv_schp->sglist_len; 2167 req_schp->sglist_len = rsv_schp->sglist_len;
2168 req_schp->buffer = rsv_schp->buffer; 2168 req_schp->buffer = rsv_schp->buffer;
2169 2169
2170 req_schp->bufflen = size; 2170 req_schp->bufflen = size;
2171 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 2171 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2172 break; 2172 break;
2173 } else 2173 } else
2174 rem -= num; 2174 rem -= num;
2175 } 2175 }
2176 2176
2177 if (k >= rsv_schp->k_use_sg) 2177 if (k >= rsv_schp->k_use_sg)
2178 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); 2178 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2179 } 2179 }
2180 2180
2181 static void 2181 static void
2182 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 2182 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2183 { 2183 {
2184 Sg_scatter_hold *req_schp = &srp->data; 2184 Sg_scatter_hold *req_schp = &srp->data;
2185 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2185 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2186 2186
2187 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 2187 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2188 (int) req_schp->k_use_sg)); 2188 (int) req_schp->k_use_sg));
2189 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { 2189 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2190 struct scatterlist *sg = rsv_schp->buffer; 2190 struct scatterlist *sg = rsv_schp->buffer;
2191 2191
2192 if (sfp->save_scat_len > 0) 2192 if (sfp->save_scat_len > 0)
2193 (sg + (req_schp->k_use_sg - 1))->length = 2193 (sg + (req_schp->k_use_sg - 1))->length =
2194 (unsigned) sfp->save_scat_len; 2194 (unsigned) sfp->save_scat_len;
2195 else 2195 else
2196 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); 2196 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2197 } 2197 }
2198 req_schp->k_use_sg = 0; 2198 req_schp->k_use_sg = 0;
2199 req_schp->bufflen = 0; 2199 req_schp->bufflen = 0;
2200 req_schp->buffer = NULL; 2200 req_schp->buffer = NULL;
2201 req_schp->sglist_len = 0; 2201 req_schp->sglist_len = 0;
2202 sfp->save_scat_len = 0; 2202 sfp->save_scat_len = 0;
2203 srp->res_used = 0; 2203 srp->res_used = 0;
2204 } 2204 }
2205 2205
2206 static Sg_request * 2206 static Sg_request *
2207 sg_get_rq_mark(Sg_fd * sfp, int pack_id) 2207 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2208 { 2208 {
2209 Sg_request *resp; 2209 Sg_request *resp;
2210 unsigned long iflags; 2210 unsigned long iflags;
2211 2211
2212 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2212 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2213 for (resp = sfp->headrp; resp; resp = resp->nextrp) { 2213 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2214 /* look for requests that are ready + not SG_IO owned */ 2214 /* look for requests that are ready + not SG_IO owned */
2215 if ((1 == resp->done) && (!resp->sg_io_owned) && 2215 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2216 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2216 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2217 resp->done = 2; /* guard against other readers */ 2217 resp->done = 2; /* guard against other readers */
2218 break; 2218 break;
2219 } 2219 }
2220 } 2220 }
2221 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2221 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2222 return resp; 2222 return resp;
2223 } 2223 }
2224 2224
2225 #ifdef CONFIG_SCSI_PROC_FS 2225 #ifdef CONFIG_SCSI_PROC_FS
2226 static Sg_request * 2226 static Sg_request *
2227 sg_get_nth_request(Sg_fd * sfp, int nth) 2227 sg_get_nth_request(Sg_fd * sfp, int nth)
2228 { 2228 {
2229 Sg_request *resp; 2229 Sg_request *resp;
2230 unsigned long iflags; 2230 unsigned long iflags;
2231 int k; 2231 int k;
2232 2232
2233 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2233 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2234 for (k = 0, resp = sfp->headrp; resp && (k < nth); 2234 for (k = 0, resp = sfp->headrp; resp && (k < nth);
2235 ++k, resp = resp->nextrp) ; 2235 ++k, resp = resp->nextrp) ;
2236 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2236 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2237 return resp; 2237 return resp;
2238 } 2238 }
2239 #endif 2239 #endif
2240 2240
2241 /* always adds to end of list */ 2241 /* always adds to end of list */
2242 static Sg_request * 2242 static Sg_request *
2243 sg_add_request(Sg_fd * sfp) 2243 sg_add_request(Sg_fd * sfp)
2244 { 2244 {
2245 int k; 2245 int k;
2246 unsigned long iflags; 2246 unsigned long iflags;
2247 Sg_request *resp; 2247 Sg_request *resp;
2248 Sg_request *rp = sfp->req_arr; 2248 Sg_request *rp = sfp->req_arr;
2249 2249
2250 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2250 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2251 resp = sfp->headrp; 2251 resp = sfp->headrp;
2252 if (!resp) { 2252 if (!resp) {
2253 memset(rp, 0, sizeof (Sg_request)); 2253 memset(rp, 0, sizeof (Sg_request));
2254 rp->parentfp = sfp; 2254 rp->parentfp = sfp;
2255 resp = rp; 2255 resp = rp;
2256 sfp->headrp = resp; 2256 sfp->headrp = resp;
2257 } else { 2257 } else {
2258 if (0 == sfp->cmd_q) 2258 if (0 == sfp->cmd_q)
2259 resp = NULL; /* command queuing disallowed */ 2259 resp = NULL; /* command queuing disallowed */
2260 else { 2260 else {
2261 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { 2261 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2262 if (!rp->parentfp) 2262 if (!rp->parentfp)
2263 break; 2263 break;
2264 } 2264 }
2265 if (k < SG_MAX_QUEUE) { 2265 if (k < SG_MAX_QUEUE) {
2266 memset(rp, 0, sizeof (Sg_request)); 2266 memset(rp, 0, sizeof (Sg_request));
2267 rp->parentfp = sfp; 2267 rp->parentfp = sfp;
2268 while (resp->nextrp) 2268 while (resp->nextrp)
2269 resp = resp->nextrp; 2269 resp = resp->nextrp;
2270 resp->nextrp = rp; 2270 resp->nextrp = rp;
2271 resp = rp; 2271 resp = rp;
2272 } else 2272 } else
2273 resp = NULL; 2273 resp = NULL;
2274 } 2274 }
2275 } 2275 }
2276 if (resp) { 2276 if (resp) {
2277 resp->nextrp = NULL; 2277 resp->nextrp = NULL;
2278 resp->header.duration = jiffies_to_msecs(jiffies); 2278 resp->header.duration = jiffies_to_msecs(jiffies);
2279 } 2279 }
2280 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2280 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2281 return resp; 2281 return resp;
2282 } 2282 }
2283 2283
2284 /* Return of 1 for found; 0 for not found */ 2284 /* Return of 1 for found; 0 for not found */
2285 static int 2285 static int
2286 sg_remove_request(Sg_fd * sfp, Sg_request * srp) 2286 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2287 { 2287 {
2288 Sg_request *prev_rp; 2288 Sg_request *prev_rp;
2289 Sg_request *rp; 2289 Sg_request *rp;
2290 unsigned long iflags; 2290 unsigned long iflags;
2291 int res = 0; 2291 int res = 0;
2292 2292
2293 if ((!sfp) || (!srp) || (!sfp->headrp)) 2293 if ((!sfp) || (!srp) || (!sfp->headrp))
2294 return res; 2294 return res;
2295 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2295 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2296 prev_rp = sfp->headrp; 2296 prev_rp = sfp->headrp;
2297 if (srp == prev_rp) { 2297 if (srp == prev_rp) {
2298 sfp->headrp = prev_rp->nextrp; 2298 sfp->headrp = prev_rp->nextrp;
2299 prev_rp->parentfp = NULL; 2299 prev_rp->parentfp = NULL;
2300 res = 1; 2300 res = 1;
2301 } else { 2301 } else {
2302 while ((rp = prev_rp->nextrp)) { 2302 while ((rp = prev_rp->nextrp)) {
2303 if (srp == rp) { 2303 if (srp == rp) {
2304 prev_rp->nextrp = rp->nextrp; 2304 prev_rp->nextrp = rp->nextrp;
2305 rp->parentfp = NULL; 2305 rp->parentfp = NULL;
2306 res = 1; 2306 res = 1;
2307 break; 2307 break;
2308 } 2308 }
2309 prev_rp = rp; 2309 prev_rp = rp;
2310 } 2310 }
2311 } 2311 }
2312 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2312 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2313 return res; 2313 return res;
2314 } 2314 }
2315 2315
2316 #ifdef CONFIG_SCSI_PROC_FS 2316 #ifdef CONFIG_SCSI_PROC_FS
2317 static Sg_fd * 2317 static Sg_fd *
2318 sg_get_nth_sfp(Sg_device * sdp, int nth) 2318 sg_get_nth_sfp(Sg_device * sdp, int nth)
2319 { 2319 {
2320 Sg_fd *resp; 2320 Sg_fd *resp;
2321 unsigned long iflags; 2321 unsigned long iflags;
2322 int k; 2322 int k;
2323 2323
2324 read_lock_irqsave(&sg_index_lock, iflags); 2324 read_lock_irqsave(&sg_index_lock, iflags);
2325 for (k = 0, resp = sdp->headfp; resp && (k < nth); 2325 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2326 ++k, resp = resp->nextfp) ; 2326 ++k, resp = resp->nextfp) ;
2327 read_unlock_irqrestore(&sg_index_lock, iflags); 2327 read_unlock_irqrestore(&sg_index_lock, iflags);
2328 return resp; 2328 return resp;
2329 } 2329 }
2330 #endif 2330 #endif
2331 2331
2332 static Sg_fd * 2332 static Sg_fd *
2333 sg_add_sfp(Sg_device * sdp, int dev) 2333 sg_add_sfp(Sg_device * sdp, int dev)
2334 { 2334 {
2335 Sg_fd *sfp; 2335 Sg_fd *sfp;
2336 unsigned long iflags; 2336 unsigned long iflags;
2337 int bufflen; 2337 int bufflen;
2338 2338
2339 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2339 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2340 if (!sfp) 2340 if (!sfp)
2341 return NULL; 2341 return NULL;
2342 2342
2343 init_waitqueue_head(&sfp->read_wait); 2343 init_waitqueue_head(&sfp->read_wait);
2344 rwlock_init(&sfp->rq_list_lock); 2344 rwlock_init(&sfp->rq_list_lock);
2345 2345
2346 sfp->timeout = SG_DEFAULT_TIMEOUT; 2346 sfp->timeout = SG_DEFAULT_TIMEOUT;
2347 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; 2347 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2348 sfp->force_packid = SG_DEF_FORCE_PACK_ID; 2348 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2349 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? 2349 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2350 sdp->device->host->unchecked_isa_dma : 1; 2350 sdp->device->host->unchecked_isa_dma : 1;
2351 sfp->cmd_q = SG_DEF_COMMAND_Q; 2351 sfp->cmd_q = SG_DEF_COMMAND_Q;
2352 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2352 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2353 sfp->parentdp = sdp; 2353 sfp->parentdp = sdp;
2354 write_lock_irqsave(&sg_index_lock, iflags); 2354 write_lock_irqsave(&sg_index_lock, iflags);
2355 if (!sdp->headfp) 2355 if (!sdp->headfp)
2356 sdp->headfp = sfp; 2356 sdp->headfp = sfp;
2357 else { /* add to tail of existing list */ 2357 else { /* add to tail of existing list */
2358 Sg_fd *pfp = sdp->headfp; 2358 Sg_fd *pfp = sdp->headfp;
2359 while (pfp->nextfp) 2359 while (pfp->nextfp)
2360 pfp = pfp->nextfp; 2360 pfp = pfp->nextfp;
2361 pfp->nextfp = sfp; 2361 pfp->nextfp = sfp;
2362 } 2362 }
2363 write_unlock_irqrestore(&sg_index_lock, iflags); 2363 write_unlock_irqrestore(&sg_index_lock, iflags);
2364 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2364 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2365 if (unlikely(sg_big_buff != def_reserved_size)) 2365 if (unlikely(sg_big_buff != def_reserved_size))
2366 sg_big_buff = def_reserved_size; 2366 sg_big_buff = def_reserved_size;
2367 2367
2368 bufflen = min_t(int, sg_big_buff, 2368 bufflen = min_t(int, sg_big_buff,
2369 sdp->device->request_queue->max_sectors * 512); 2369 sdp->device->request_queue->max_sectors * 512);
2370 sg_build_reserve(sfp, bufflen); 2370 sg_build_reserve(sfp, bufflen);
2371 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2371 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2372 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2372 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2373 return sfp; 2373 return sfp;
2374 } 2374 }
2375 2375
2376 static void 2376 static void
2377 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2377 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2378 { 2378 {
2379 Sg_fd *fp; 2379 Sg_fd *fp;
2380 Sg_fd *prev_fp; 2380 Sg_fd *prev_fp;
2381 2381
2382 prev_fp = sdp->headfp; 2382 prev_fp = sdp->headfp;
2383 if (sfp == prev_fp) 2383 if (sfp == prev_fp)
2384 sdp->headfp = prev_fp->nextfp; 2384 sdp->headfp = prev_fp->nextfp;
2385 else { 2385 else {
2386 while ((fp = prev_fp->nextfp)) { 2386 while ((fp = prev_fp->nextfp)) {
2387 if (sfp == fp) { 2387 if (sfp == fp) {
2388 prev_fp->nextfp = fp->nextfp; 2388 prev_fp->nextfp = fp->nextfp;
2389 break; 2389 break;
2390 } 2390 }
2391 prev_fp = fp; 2391 prev_fp = fp;
2392 } 2392 }
2393 } 2393 }
2394 if (sfp->reserve.bufflen > 0) { 2394 if (sfp->reserve.bufflen > 0) {
2395 SCSI_LOG_TIMEOUT(6, 2395 SCSI_LOG_TIMEOUT(6,
2396 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", 2396 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2397 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); 2397 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2398 sg_remove_scat(&sfp->reserve); 2398 sg_remove_scat(&sfp->reserve);
2399 } 2399 }
2400 sfp->parentdp = NULL; 2400 sfp->parentdp = NULL;
2401 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); 2401 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2402 kfree(sfp); 2402 kfree(sfp);
2403 } 2403 }
2404 2404
2405 /* Returns 0 in normal case, 1 when detached and sdp object removed */ 2405 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2406 static int 2406 static int
2407 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2407 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2408 { 2408 {
2409 Sg_request *srp; 2409 Sg_request *srp;
2410 Sg_request *tsrp; 2410 Sg_request *tsrp;
2411 int dirty = 0; 2411 int dirty = 0;
2412 int res = 0; 2412 int res = 0;
2413 2413
2414 for (srp = sfp->headrp; srp; srp = tsrp) { 2414 for (srp = sfp->headrp; srp; srp = tsrp) {
2415 tsrp = srp->nextrp; 2415 tsrp = srp->nextrp;
2416 if (sg_srp_done(srp, sfp)) 2416 if (sg_srp_done(srp, sfp))
2417 sg_finish_rem_req(srp); 2417 sg_finish_rem_req(srp);
2418 else 2418 else
2419 ++dirty; 2419 ++dirty;
2420 } 2420 }
2421 if (0 == dirty) { 2421 if (0 == dirty) {
2422 unsigned long iflags; 2422 unsigned long iflags;
2423 2423
2424 write_lock_irqsave(&sg_index_lock, iflags); 2424 write_lock_irqsave(&sg_index_lock, iflags);
2425 __sg_remove_sfp(sdp, sfp); 2425 __sg_remove_sfp(sdp, sfp);
2426 if (sdp->detached && (NULL == sdp->headfp)) { 2426 if (sdp->detached && (NULL == sdp->headfp)) {
2427 idr_remove(&sg_index_idr, sdp->index); 2427 idr_remove(&sg_index_idr, sdp->index);
2428 kfree(sdp); 2428 kfree(sdp);
2429 res = 1; 2429 res = 1;
2430 } 2430 }
2431 write_unlock_irqrestore(&sg_index_lock, iflags); 2431 write_unlock_irqrestore(&sg_index_lock, iflags);
2432 } else { 2432 } else {
2433 /* MOD_INC's to inhibit unloading sg and associated adapter driver */ 2433 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2434 /* only bump the access_count if we actually succeeded in 2434 /* only bump the access_count if we actually succeeded in
2435 * throwing another counter on the host module */ 2435 * throwing another counter on the host module */
2436 scsi_device_get(sdp->device); /* XXX: retval ignored? */ 2436 scsi_device_get(sdp->device); /* XXX: retval ignored? */
2437 sfp->closed = 1; /* flag dirty state on this fd */ 2437 sfp->closed = 1; /* flag dirty state on this fd */
2438 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n", 2438 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2439 dirty)); 2439 dirty));
2440 } 2440 }
2441 return res; 2441 return res;
2442 } 2442 }
2443 2443
2444 static int 2444 static int
2445 sg_res_in_use(Sg_fd * sfp) 2445 sg_res_in_use(Sg_fd * sfp)
2446 { 2446 {
2447 const Sg_request *srp; 2447 const Sg_request *srp;
2448 unsigned long iflags; 2448 unsigned long iflags;
2449 2449
2450 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2450 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2451 for (srp = sfp->headrp; srp; srp = srp->nextrp) 2451 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2452 if (srp->res_used) 2452 if (srp->res_used)
2453 break; 2453 break;
2454 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2454 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2455 return srp ? 1 : 0; 2455 return srp ? 1 : 0;
2456 } 2456 }
2457 2457
2458 /* The size fetched (value output via retSzp) set when non-NULL return */ 2458 /* The size fetched (value output via retSzp) set when non-NULL return */
2459 static struct page * 2459 static struct page *
2460 sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2460 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2461 { 2461 {
2462 struct page *resp = NULL; 2462 struct page *resp = NULL;
2463 gfp_t page_mask; 2463 gfp_t page_mask;
2464 int order, a_size; 2464 int order, a_size;
2465 int resSz; 2465 int resSz;
2466 2466
2467 if ((rqSz <= 0) || (NULL == retSzp)) 2467 if ((rqSz <= 0) || (NULL == retSzp))
2468 return resp; 2468 return resp;
2469 2469
2470 if (lowDma) 2470 if (lowDma)
2471 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN; 2471 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2472 else 2472 else
2473 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; 2473 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2474 2474
2475 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; 2475 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2476 order++, a_size <<= 1) ; 2476 order++, a_size <<= 1) ;
2477 resSz = a_size; /* rounded up if necessary */ 2477 resSz = a_size; /* rounded up if necessary */
2478 resp = alloc_pages(page_mask, order); 2478 resp = alloc_pages(page_mask, order);
2479 while ((!resp) && order) { 2479 while ((!resp) && order) {
2480 --order; 2480 --order;
2481 a_size >>= 1; /* divide by 2, until PAGE_SIZE */ 2481 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2482 resp = alloc_pages(page_mask, order); /* try half */ 2482 resp = alloc_pages(page_mask, order); /* try half */
2483 resSz = a_size; 2483 resSz = a_size;
2484 } 2484 }
2485 if (resp) { 2485 if (resp) {
2486 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2486 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2487 memset(page_address(resp), 0, resSz); 2487 memset(page_address(resp), 0, resSz);
2488 *retSzp = resSz; 2488 *retSzp = resSz;
2489 } 2489 }
2490 return resp; 2490 return resp;
2491 } 2491 }
2492 2492
2493 static void 2493 static void
2494 sg_page_free(struct page *page, int size) 2494 sg_page_free(struct page *page, int size)
2495 { 2495 {
2496 int order, a_size; 2496 int order, a_size;
2497 2497
2498 if (!page) 2498 if (!page)
2499 return; 2499 return;
2500 for (order = 0, a_size = PAGE_SIZE; a_size < size; 2500 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2501 order++, a_size <<= 1) ; 2501 order++, a_size <<= 1) ;
2502 __free_pages(page, order); 2502 __free_pages(page, order);
2503 } 2503 }
2504 2504
2505 #ifndef MAINTENANCE_IN_CMD 2505 #ifndef MAINTENANCE_IN_CMD
2506 #define MAINTENANCE_IN_CMD 0xa3 2506 #define MAINTENANCE_IN_CMD 0xa3
2507 #endif 2507 #endif
2508
2509 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2510 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2511 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2512 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2513 };
2514
2515 static int
2516 sg_allow_access(unsigned char opcode, char dev_type)
2517 {
2518 int k;
2519
2520 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2521 return 1;
2522 for (k = 0; k < sizeof (allow_ops); ++k) {
2523 if (opcode == allow_ops[k])
2524 return 1;
2525 }
2526 return 0;
2527 }
2528 2508
2529 #ifdef CONFIG_SCSI_PROC_FS 2509 #ifdef CONFIG_SCSI_PROC_FS
2530 static int 2510 static int
2531 sg_idr_max_id(int id, void *p, void *data) 2511 sg_idr_max_id(int id, void *p, void *data)
2532 { 2512 {
2533 int *k = data; 2513 int *k = data;
2534 2514
2535 if (*k < id) 2515 if (*k < id)
2536 *k = id; 2516 *k = id;
2537 2517
2538 return 0; 2518 return 0;
2539 } 2519 }
2540 2520
2541 static int 2521 static int
2542 sg_last_dev(void) 2522 sg_last_dev(void)
2543 { 2523 {
2544 int k = -1; 2524 int k = -1;
2545 unsigned long iflags; 2525 unsigned long iflags;
2546 2526
2547 read_lock_irqsave(&sg_index_lock, iflags); 2527 read_lock_irqsave(&sg_index_lock, iflags);
2548 idr_for_each(&sg_index_idr, sg_idr_max_id, &k); 2528 idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2549 read_unlock_irqrestore(&sg_index_lock, iflags); 2529 read_unlock_irqrestore(&sg_index_lock, iflags);
2550 return k + 1; /* origin 1 */ 2530 return k + 1; /* origin 1 */
2551 } 2531 }
2552 #endif 2532 #endif
2553 2533
2554 static Sg_device * 2534 static Sg_device *
2555 sg_get_dev(int dev) 2535 sg_get_dev(int dev)
2556 { 2536 {
2557 Sg_device *sdp; 2537 Sg_device *sdp;
2558 unsigned long iflags; 2538 unsigned long iflags;
2559 2539
2560 read_lock_irqsave(&sg_index_lock, iflags); 2540 read_lock_irqsave(&sg_index_lock, iflags);
2561 sdp = idr_find(&sg_index_idr, dev); 2541 sdp = idr_find(&sg_index_idr, dev);
2562 read_unlock_irqrestore(&sg_index_lock, iflags); 2542 read_unlock_irqrestore(&sg_index_lock, iflags);
2563 2543
2564 return sdp; 2544 return sdp;
2565 } 2545 }
2566 2546
2567 #ifdef CONFIG_SCSI_PROC_FS 2547 #ifdef CONFIG_SCSI_PROC_FS
2568 2548
2569 static struct proc_dir_entry *sg_proc_sgp = NULL; 2549 static struct proc_dir_entry *sg_proc_sgp = NULL;
2570 2550
2571 static char sg_proc_sg_dirname[] = "scsi/sg"; 2551 static char sg_proc_sg_dirname[] = "scsi/sg";
2572 2552
2573 static int sg_proc_seq_show_int(struct seq_file *s, void *v); 2553 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2574 2554
2575 static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2555 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2576 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2556 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2577 size_t count, loff_t *off); 2557 size_t count, loff_t *off);
2578 static struct file_operations adio_fops = { 2558 static struct file_operations adio_fops = {
2579 /* .owner, .read and .llseek added in sg_proc_init() */ 2559 /* .owner, .read and .llseek added in sg_proc_init() */
2580 .open = sg_proc_single_open_adio, 2560 .open = sg_proc_single_open_adio,
2581 .write = sg_proc_write_adio, 2561 .write = sg_proc_write_adio,
2582 .release = single_release, 2562 .release = single_release,
2583 }; 2563 };
2584 2564
2585 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2565 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2586 static ssize_t sg_proc_write_dressz(struct file *filp, 2566 static ssize_t sg_proc_write_dressz(struct file *filp,
2587 const char __user *buffer, size_t count, loff_t *off); 2567 const char __user *buffer, size_t count, loff_t *off);
2588 static struct file_operations dressz_fops = { 2568 static struct file_operations dressz_fops = {
2589 .open = sg_proc_single_open_dressz, 2569 .open = sg_proc_single_open_dressz,
2590 .write = sg_proc_write_dressz, 2570 .write = sg_proc_write_dressz,
2591 .release = single_release, 2571 .release = single_release,
2592 }; 2572 };
2593 2573
2594 static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2574 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2595 static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2575 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2596 static struct file_operations version_fops = { 2576 static struct file_operations version_fops = {
2597 .open = sg_proc_single_open_version, 2577 .open = sg_proc_single_open_version,
2598 .release = single_release, 2578 .release = single_release,
2599 }; 2579 };
2600 2580
2601 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2581 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2602 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2582 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2603 static struct file_operations devhdr_fops = { 2583 static struct file_operations devhdr_fops = {
2604 .open = sg_proc_single_open_devhdr, 2584 .open = sg_proc_single_open_devhdr,
2605 .release = single_release, 2585 .release = single_release,
2606 }; 2586 };
2607 2587
2608 static int sg_proc_seq_show_dev(struct seq_file *s, void *v); 2588 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2609 static int sg_proc_open_dev(struct inode *inode, struct file *file); 2589 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2610 static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2590 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2611 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2591 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2612 static void dev_seq_stop(struct seq_file *s, void *v); 2592 static void dev_seq_stop(struct seq_file *s, void *v);
2613 static struct file_operations dev_fops = { 2593 static struct file_operations dev_fops = {
2614 .open = sg_proc_open_dev, 2594 .open = sg_proc_open_dev,
2615 .release = seq_release, 2595 .release = seq_release,
2616 }; 2596 };
2617 static struct seq_operations dev_seq_ops = { 2597 static struct seq_operations dev_seq_ops = {
2618 .start = dev_seq_start, 2598 .start = dev_seq_start,
2619 .next = dev_seq_next, 2599 .next = dev_seq_next,
2620 .stop = dev_seq_stop, 2600 .stop = dev_seq_stop,
2621 .show = sg_proc_seq_show_dev, 2601 .show = sg_proc_seq_show_dev,
2622 }; 2602 };
2623 2603
2624 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2604 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2625 static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2605 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2626 static struct file_operations devstrs_fops = { 2606 static struct file_operations devstrs_fops = {
2627 .open = sg_proc_open_devstrs, 2607 .open = sg_proc_open_devstrs,
2628 .release = seq_release, 2608 .release = seq_release,
2629 }; 2609 };
2630 static struct seq_operations devstrs_seq_ops = { 2610 static struct seq_operations devstrs_seq_ops = {
2631 .start = dev_seq_start, 2611 .start = dev_seq_start,
2632 .next = dev_seq_next, 2612 .next = dev_seq_next,
2633 .stop = dev_seq_stop, 2613 .stop = dev_seq_stop,
2634 .show = sg_proc_seq_show_devstrs, 2614 .show = sg_proc_seq_show_devstrs,
2635 }; 2615 };
2636 2616
2637 static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2617 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2638 static int sg_proc_open_debug(struct inode *inode, struct file *file); 2618 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2639 static struct file_operations debug_fops = { 2619 static struct file_operations debug_fops = {
2640 .open = sg_proc_open_debug, 2620 .open = sg_proc_open_debug,
2641 .release = seq_release, 2621 .release = seq_release,
2642 }; 2622 };
2643 static struct seq_operations debug_seq_ops = { 2623 static struct seq_operations debug_seq_ops = {
2644 .start = dev_seq_start, 2624 .start = dev_seq_start,
2645 .next = dev_seq_next, 2625 .next = dev_seq_next,
2646 .stop = dev_seq_stop, 2626 .stop = dev_seq_stop,
2647 .show = sg_proc_seq_show_debug, 2627 .show = sg_proc_seq_show_debug,
2648 }; 2628 };
2649 2629
2650 2630
2651 struct sg_proc_leaf { 2631 struct sg_proc_leaf {
2652 const char * name; 2632 const char * name;
2653 struct file_operations * fops; 2633 struct file_operations * fops;
2654 }; 2634 };
2655 2635
2656 static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2636 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2657 {"allow_dio", &adio_fops}, 2637 {"allow_dio", &adio_fops},
2658 {"debug", &debug_fops}, 2638 {"debug", &debug_fops},
2659 {"def_reserved_size", &dressz_fops}, 2639 {"def_reserved_size", &dressz_fops},
2660 {"device_hdr", &devhdr_fops}, 2640 {"device_hdr", &devhdr_fops},
2661 {"devices", &dev_fops}, 2641 {"devices", &dev_fops},
2662 {"device_strs", &devstrs_fops}, 2642 {"device_strs", &devstrs_fops},
2663 {"version", &version_fops} 2643 {"version", &version_fops}
2664 }; 2644 };
2665 2645
2666 static int 2646 static int
2667 sg_proc_init(void) 2647 sg_proc_init(void)
2668 { 2648 {
2669 int k, mask; 2649 int k, mask;
2670 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); 2650 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2671 struct sg_proc_leaf * leaf; 2651 struct sg_proc_leaf * leaf;
2672 2652
2673 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); 2653 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2674 if (!sg_proc_sgp) 2654 if (!sg_proc_sgp)
2675 return 1; 2655 return 1;
2676 for (k = 0; k < num_leaves; ++k) { 2656 for (k = 0; k < num_leaves; ++k) {
2677 leaf = &sg_proc_leaf_arr[k]; 2657 leaf = &sg_proc_leaf_arr[k];
2678 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2658 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2679 leaf->fops->owner = THIS_MODULE; 2659 leaf->fops->owner = THIS_MODULE;
2680 leaf->fops->read = seq_read; 2660 leaf->fops->read = seq_read;
2681 leaf->fops->llseek = seq_lseek; 2661 leaf->fops->llseek = seq_lseek;
2682 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); 2662 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2683 } 2663 }
2684 return 0; 2664 return 0;
2685 } 2665 }
2686 2666
2687 static void 2667 static void
2688 sg_proc_cleanup(void) 2668 sg_proc_cleanup(void)
2689 { 2669 {
2690 int k; 2670 int k;
2691 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); 2671 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2692 2672
2693 if (!sg_proc_sgp) 2673 if (!sg_proc_sgp)
2694 return; 2674 return;
2695 for (k = 0; k < num_leaves; ++k) 2675 for (k = 0; k < num_leaves; ++k)
2696 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); 2676 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2697 remove_proc_entry(sg_proc_sg_dirname, NULL); 2677 remove_proc_entry(sg_proc_sg_dirname, NULL);
2698 } 2678 }
2699 2679
2700 2680
2701 static int sg_proc_seq_show_int(struct seq_file *s, void *v) 2681 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2702 { 2682 {
2703 seq_printf(s, "%d\n", *((int *)s->private)); 2683 seq_printf(s, "%d\n", *((int *)s->private));
2704 return 0; 2684 return 0;
2705 } 2685 }
2706 2686
2707 static int sg_proc_single_open_adio(struct inode *inode, struct file *file) 2687 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2708 { 2688 {
2709 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); 2689 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2710 } 2690 }
2711 2691
2712 static ssize_t 2692 static ssize_t
2713 sg_proc_write_adio(struct file *filp, const char __user *buffer, 2693 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2714 size_t count, loff_t *off) 2694 size_t count, loff_t *off)
2715 { 2695 {
2716 int num; 2696 int num;
2717 char buff[11]; 2697 char buff[11];
2718 2698
2719 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2699 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2720 return -EACCES; 2700 return -EACCES;
2721 num = (count < 10) ? count : 10; 2701 num = (count < 10) ? count : 10;
2722 if (copy_from_user(buff, buffer, num)) 2702 if (copy_from_user(buff, buffer, num))
2723 return -EFAULT; 2703 return -EFAULT;
2724 buff[num] = '\0'; 2704 buff[num] = '\0';
2725 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0; 2705 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2726 return count; 2706 return count;
2727 } 2707 }
2728 2708
2729 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) 2709 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2730 { 2710 {
2731 return single_open(file, sg_proc_seq_show_int, &sg_big_buff); 2711 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2732 } 2712 }
2733 2713
2734 static ssize_t 2714 static ssize_t
2735 sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2715 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2736 size_t count, loff_t *off) 2716 size_t count, loff_t *off)
2737 { 2717 {
2738 int num; 2718 int num;
2739 unsigned long k = ULONG_MAX; 2719 unsigned long k = ULONG_MAX;
2740 char buff[11]; 2720 char buff[11];
2741 2721
2742 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2722 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2743 return -EACCES; 2723 return -EACCES;
2744 num = (count < 10) ? count : 10; 2724 num = (count < 10) ? count : 10;
2745 if (copy_from_user(buff, buffer, num)) 2725 if (copy_from_user(buff, buffer, num))
2746 return -EFAULT; 2726 return -EFAULT;
2747 buff[num] = '\0'; 2727 buff[num] = '\0';
2748 k = simple_strtoul(buff, NULL, 10); 2728 k = simple_strtoul(buff, NULL, 10);
2749 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2729 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2750 sg_big_buff = k; 2730 sg_big_buff = k;
2751 return count; 2731 return count;
2752 } 2732 }
2753 return -ERANGE; 2733 return -ERANGE;
2754 } 2734 }
2755 2735
2756 static int sg_proc_seq_show_version(struct seq_file *s, void *v) 2736 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2757 { 2737 {
2758 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, 2738 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2759 sg_version_date); 2739 sg_version_date);
2760 return 0; 2740 return 0;
2761 } 2741 }
2762 2742
2763 static int sg_proc_single_open_version(struct inode *inode, struct file *file) 2743 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2764 { 2744 {
2765 return single_open(file, sg_proc_seq_show_version, NULL); 2745 return single_open(file, sg_proc_seq_show_version, NULL);
2766 } 2746 }
2767 2747
2768 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) 2748 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2769 { 2749 {
2770 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" 2750 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2771 "online\n"); 2751 "online\n");
2772 return 0; 2752 return 0;
2773 } 2753 }
2774 2754
2775 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) 2755 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2776 { 2756 {
2777 return single_open(file, sg_proc_seq_show_devhdr, NULL); 2757 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2778 } 2758 }
2779 2759
2780 struct sg_proc_deviter { 2760 struct sg_proc_deviter {
2781 loff_t index; 2761 loff_t index;
2782 size_t max; 2762 size_t max;
2783 }; 2763 };
2784 2764
2785 static void * dev_seq_start(struct seq_file *s, loff_t *pos) 2765 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2786 { 2766 {
2787 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); 2767 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2788 2768
2789 s->private = it; 2769 s->private = it;
2790 if (! it) 2770 if (! it)
2791 return NULL; 2771 return NULL;
2792 2772
2793 it->index = *pos; 2773 it->index = *pos;
2794 it->max = sg_last_dev(); 2774 it->max = sg_last_dev();
2795 if (it->index >= it->max) 2775 if (it->index >= it->max)
2796 return NULL; 2776 return NULL;
2797 return it; 2777 return it;
2798 } 2778 }
2799 2779
2800 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) 2780 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2801 { 2781 {
2802 struct sg_proc_deviter * it = s->private; 2782 struct sg_proc_deviter * it = s->private;
2803 2783
2804 *pos = ++it->index; 2784 *pos = ++it->index;
2805 return (it->index < it->max) ? it : NULL; 2785 return (it->index < it->max) ? it : NULL;
2806 } 2786 }
2807 2787
2808 static void dev_seq_stop(struct seq_file *s, void *v) 2788 static void dev_seq_stop(struct seq_file *s, void *v)
2809 { 2789 {
2810 kfree(s->private); 2790 kfree(s->private);
2811 } 2791 }
2812 2792
2813 static int sg_proc_open_dev(struct inode *inode, struct file *file) 2793 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2814 { 2794 {
2815 return seq_open(file, &dev_seq_ops); 2795 return seq_open(file, &dev_seq_ops);
2816 } 2796 }
2817 2797
2818 static int sg_proc_seq_show_dev(struct seq_file *s, void *v) 2798 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2819 { 2799 {
2820 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2800 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2821 Sg_device *sdp; 2801 Sg_device *sdp;
2822 struct scsi_device *scsidp; 2802 struct scsi_device *scsidp;
2823 2803
2824 sdp = it ? sg_get_dev(it->index) : NULL; 2804 sdp = it ? sg_get_dev(it->index) : NULL;
2825 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2805 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2826 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 2806 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2827 scsidp->host->host_no, scsidp->channel, 2807 scsidp->host->host_no, scsidp->channel,
2828 scsidp->id, scsidp->lun, (int) scsidp->type, 2808 scsidp->id, scsidp->lun, (int) scsidp->type,
2829 1, 2809 1,
2830 (int) scsidp->queue_depth, 2810 (int) scsidp->queue_depth,
2831 (int) scsidp->device_busy, 2811 (int) scsidp->device_busy,
2832 (int) scsi_device_online(scsidp)); 2812 (int) scsi_device_online(scsidp));
2833 else 2813 else
2834 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); 2814 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2835 return 0; 2815 return 0;
2836 } 2816 }
2837 2817
2838 static int sg_proc_open_devstrs(struct inode *inode, struct file *file) 2818 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2839 { 2819 {
2840 return seq_open(file, &devstrs_seq_ops); 2820 return seq_open(file, &devstrs_seq_ops);
2841 } 2821 }
2842 2822
2843 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) 2823 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2844 { 2824 {
2845 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2825 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2846 Sg_device *sdp; 2826 Sg_device *sdp;
2847 struct scsi_device *scsidp; 2827 struct scsi_device *scsidp;
2848 2828
2849 sdp = it ? sg_get_dev(it->index) : NULL; 2829 sdp = it ? sg_get_dev(it->index) : NULL;
2850 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2830 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2851 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", 2831 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2852 scsidp->vendor, scsidp->model, scsidp->rev); 2832 scsidp->vendor, scsidp->model, scsidp->rev);
2853 else 2833 else
2854 seq_printf(s, "<no active device>\n"); 2834 seq_printf(s, "<no active device>\n");
2855 return 0; 2835 return 0;
2856 } 2836 }
2857 2837
2858 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2838 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2859 { 2839 {
2860 int k, m, new_interface, blen, usg; 2840 int k, m, new_interface, blen, usg;
2861 Sg_request *srp; 2841 Sg_request *srp;
2862 Sg_fd *fp; 2842 Sg_fd *fp;
2863 const sg_io_hdr_t *hp; 2843 const sg_io_hdr_t *hp;
2864 const char * cp; 2844 const char * cp;
2865 unsigned int ms; 2845 unsigned int ms;
2866 2846
2867 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { 2847 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2868 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " 2848 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2869 "(res)sgat=%d low_dma=%d\n", k + 1, 2849 "(res)sgat=%d low_dma=%d\n", k + 1,
2870 jiffies_to_msecs(fp->timeout), 2850 jiffies_to_msecs(fp->timeout),
2871 fp->reserve.bufflen, 2851 fp->reserve.bufflen,
2872 (int) fp->reserve.k_use_sg, 2852 (int) fp->reserve.k_use_sg,
2873 (int) fp->low_dma); 2853 (int) fp->low_dma);
2874 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", 2854 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2875 (int) fp->cmd_q, (int) fp->force_packid, 2855 (int) fp->cmd_q, (int) fp->force_packid,
2876 (int) fp->keep_orphan, (int) fp->closed); 2856 (int) fp->keep_orphan, (int) fp->closed);
2877 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { 2857 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
2878 hp = &srp->header; 2858 hp = &srp->header;
2879 new_interface = (hp->interface_id == '\0') ? 0 : 1; 2859 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2880 if (srp->res_used) { 2860 if (srp->res_used) {
2881 if (new_interface && 2861 if (new_interface &&
2882 (SG_FLAG_MMAP_IO & hp->flags)) 2862 (SG_FLAG_MMAP_IO & hp->flags))
2883 cp = " mmap>> "; 2863 cp = " mmap>> ";
2884 else 2864 else
2885 cp = " rb>> "; 2865 cp = " rb>> ";
2886 } else { 2866 } else {
2887 if (SG_INFO_DIRECT_IO_MASK & hp->info) 2867 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2888 cp = " dio>> "; 2868 cp = " dio>> ";
2889 else 2869 else
2890 cp = " "; 2870 cp = " ";
2891 } 2871 }
2892 seq_printf(s, cp); 2872 seq_printf(s, cp);
2893 blen = srp->data.bufflen; 2873 blen = srp->data.bufflen;
2894 usg = srp->data.k_use_sg; 2874 usg = srp->data.k_use_sg;
2895 seq_printf(s, srp->done ? 2875 seq_printf(s, srp->done ?
2896 ((1 == srp->done) ? "rcv:" : "fin:") 2876 ((1 == srp->done) ? "rcv:" : "fin:")
2897 : "act:"); 2877 : "act:");
2898 seq_printf(s, " id=%d blen=%d", 2878 seq_printf(s, " id=%d blen=%d",
2899 srp->header.pack_id, blen); 2879 srp->header.pack_id, blen);
2900 if (srp->done) 2880 if (srp->done)
2901 seq_printf(s, " dur=%d", hp->duration); 2881 seq_printf(s, " dur=%d", hp->duration);
2902 else { 2882 else {
2903 ms = jiffies_to_msecs(jiffies); 2883 ms = jiffies_to_msecs(jiffies);
2904 seq_printf(s, " t_o/elap=%d/%d", 2884 seq_printf(s, " t_o/elap=%d/%d",
2905 (new_interface ? hp->timeout : 2885 (new_interface ? hp->timeout :
2906 jiffies_to_msecs(fp->timeout)), 2886 jiffies_to_msecs(fp->timeout)),
2907 (ms > hp->duration ? ms - hp->duration : 0)); 2887 (ms > hp->duration ? ms - hp->duration : 0));
2908 } 2888 }
2909 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, 2889 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2910 (int) srp->data.cmd_opcode); 2890 (int) srp->data.cmd_opcode);
2911 } 2891 }
2912 if (0 == m) 2892 if (0 == m)
2913 seq_printf(s, " No requests active\n"); 2893 seq_printf(s, " No requests active\n");
2914 } 2894 }
2915 } 2895 }
2916 2896
2917 static int sg_proc_open_debug(struct inode *inode, struct file *file) 2897 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2918 { 2898 {
2919 return seq_open(file, &debug_seq_ops); 2899 return seq_open(file, &debug_seq_ops);
2920 } 2900 }
2921 2901
2922 static int sg_proc_seq_show_debug(struct seq_file *s, void *v) 2902 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2923 { 2903 {
2924 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2904 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2925 Sg_device *sdp; 2905 Sg_device *sdp;
2926 2906
2927 if (it && (0 == it->index)) { 2907 if (it && (0 == it->index)) {
2928 seq_printf(s, "max_active_device=%d(origin 1)\n", 2908 seq_printf(s, "max_active_device=%d(origin 1)\n",
2929 (int)it->max); 2909 (int)it->max);
2930 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); 2910 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2931 } 2911 }
2932 sdp = it ? sg_get_dev(it->index) : NULL; 2912 sdp = it ? sg_get_dev(it->index) : NULL;
2933 if (sdp) { 2913 if (sdp) {
2934 struct scsi_device *scsidp = sdp->device; 2914 struct scsi_device *scsidp = sdp->device;
2935 2915
2936 if (NULL == scsidp) { 2916 if (NULL == scsidp) {
2937 seq_printf(s, "device %d detached ??\n", 2917 seq_printf(s, "device %d detached ??\n",
2938 (int)it->index); 2918 (int)it->index);
2939 return 0; 2919 return 0;
2940 } 2920 }
2941 2921
2942 if (sg_get_nth_sfp(sdp, 0)) { 2922 if (sg_get_nth_sfp(sdp, 0)) {
2943 seq_printf(s, " >>> device=%s ", 2923 seq_printf(s, " >>> device=%s ",
2944 sdp->disk->disk_name); 2924 sdp->disk->disk_name);
2945 if (sdp->detached) 2925 if (sdp->detached)
2946 seq_printf(s, "detached pending close "); 2926 seq_printf(s, "detached pending close ");
2947 else 2927 else
2948 seq_printf 2928 seq_printf
2949 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2929 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2950 scsidp->host->host_no, 2930 scsidp->host->host_no,
2951 scsidp->channel, scsidp->id, 2931 scsidp->channel, scsidp->id,
2952 scsidp->lun, 2932 scsidp->lun,
2953 scsidp->host->hostt->emulated); 2933 scsidp->host->hostt->emulated);
2954 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2934 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2955 sdp->sg_tablesize, sdp->exclude); 2935 sdp->sg_tablesize, sdp->exclude);
2956 } 2936 }
2957 sg_proc_debug_helper(s, sdp); 2937 sg_proc_debug_helper(s, sdp);
2958 } 2938 }
2959 return 0; 2939 return 0;
2960 } 2940 }
2961 2941
2962 #endif /* CONFIG_SCSI_PROC_FS */ 2942 #endif /* CONFIG_SCSI_PROC_FS */
2963 2943
2964 module_init(init_sg); 2944 module_init(init_sg);
include/linux/blkdev.h
1 #ifndef _LINUX_BLKDEV_H 1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H
3 3
4 #ifdef CONFIG_BLOCK 4 #ifdef CONFIG_BLOCK
5 5
6 #include <linux/sched.h> 6 #include <linux/sched.h>
7 #include <linux/major.h> 7 #include <linux/major.h>
8 #include <linux/genhd.h> 8 #include <linux/genhd.h>
9 #include <linux/list.h> 9 #include <linux/list.h>
10 #include <linux/timer.h> 10 #include <linux/timer.h>
11 #include <linux/workqueue.h> 11 #include <linux/workqueue.h>
12 #include <linux/pagemap.h> 12 #include <linux/pagemap.h>
13 #include <linux/backing-dev.h> 13 #include <linux/backing-dev.h>
14 #include <linux/wait.h> 14 #include <linux/wait.h>
15 #include <linux/mempool.h> 15 #include <linux/mempool.h>
16 #include <linux/bio.h> 16 #include <linux/bio.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/stringify.h> 18 #include <linux/stringify.h>
19 #include <linux/bsg.h> 19 #include <linux/bsg.h>
20 20
21 #include <asm/scatterlist.h> 21 #include <asm/scatterlist.h>
22 22
23 struct scsi_ioctl_command; 23 struct scsi_ioctl_command;
24 24
25 struct request_queue; 25 struct request_queue;
26 struct elevator_queue; 26 struct elevator_queue;
27 typedef struct elevator_queue elevator_t; 27 typedef struct elevator_queue elevator_t;
28 struct request_pm_state; 28 struct request_pm_state;
29 struct blk_trace; 29 struct blk_trace;
30 struct request; 30 struct request;
31 struct sg_io_hdr; 31 struct sg_io_hdr;
32 32
33 #define BLKDEV_MIN_RQ 4 33 #define BLKDEV_MIN_RQ 4
34 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 34 #define BLKDEV_MAX_RQ 128 /* Default maximum */
35 35
36 struct request; 36 struct request;
37 typedef void (rq_end_io_fn)(struct request *, int); 37 typedef void (rq_end_io_fn)(struct request *, int);
38 38
39 struct request_list { 39 struct request_list {
40 int count[2]; 40 int count[2];
41 int starved[2]; 41 int starved[2];
42 int elvpriv; 42 int elvpriv;
43 mempool_t *rq_pool; 43 mempool_t *rq_pool;
44 wait_queue_head_t wait[2]; 44 wait_queue_head_t wait[2];
45 }; 45 };
46 46
47 /* 47 /*
48 * request command types 48 * request command types
49 */ 49 */
50 enum rq_cmd_type_bits { 50 enum rq_cmd_type_bits {
51 REQ_TYPE_FS = 1, /* fs request */ 51 REQ_TYPE_FS = 1, /* fs request */
52 REQ_TYPE_BLOCK_PC, /* scsi command */ 52 REQ_TYPE_BLOCK_PC, /* scsi command */
53 REQ_TYPE_SENSE, /* sense request */ 53 REQ_TYPE_SENSE, /* sense request */
54 REQ_TYPE_PM_SUSPEND, /* suspend request */ 54 REQ_TYPE_PM_SUSPEND, /* suspend request */
55 REQ_TYPE_PM_RESUME, /* resume request */ 55 REQ_TYPE_PM_RESUME, /* resume request */
56 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 56 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
57 REQ_TYPE_FLUSH, /* flush request */ 57 REQ_TYPE_FLUSH, /* flush request */
58 REQ_TYPE_SPECIAL, /* driver defined type */ 58 REQ_TYPE_SPECIAL, /* driver defined type */
59 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 59 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
60 /* 60 /*
61 * for ATA/ATAPI devices. this really doesn't belong here, ide should 61 * for ATA/ATAPI devices. this really doesn't belong here, ide should
62 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 62 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
63 * private REQ_LB opcodes to differentiate what type of request this is 63 * private REQ_LB opcodes to differentiate what type of request this is
64 */ 64 */
65 REQ_TYPE_ATA_TASKFILE, 65 REQ_TYPE_ATA_TASKFILE,
66 REQ_TYPE_ATA_PC, 66 REQ_TYPE_ATA_PC,
67 }; 67 };
68 68
69 /* 69 /*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
72 * SCSI cdb. 72 * SCSI cdb.
73 * 73 *
74 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 74 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
75 * typically to differentiate REQ_TYPE_SPECIAL requests. 75 * typically to differentiate REQ_TYPE_SPECIAL requests.
76 * 76 *
77 */ 77 */
78 enum { 78 enum {
79 /* 79 /*
80 * just examples for now 80 * just examples for now
81 */ 81 */
82 REQ_LB_OP_EJECT = 0x40, /* eject request */ 82 REQ_LB_OP_EJECT = 0x40, /* eject request */
83 REQ_LB_OP_FLUSH = 0x41, /* flush device */ 83 REQ_LB_OP_FLUSH = 0x41, /* flush device */
84 }; 84 };
85 85
86 /* 86 /*
87 * request type modified bits. first three bits match BIO_RW* bits, important 87 * request type modified bits. first three bits match BIO_RW* bits, important
88 */ 88 */
89 enum rq_flag_bits { 89 enum rq_flag_bits {
90 __REQ_RW, /* not set, read. set, write */ 90 __REQ_RW, /* not set, read. set, write */
91 __REQ_FAILFAST, /* no low level driver retries */ 91 __REQ_FAILFAST, /* no low level driver retries */
92 __REQ_SORTED, /* elevator knows about this request */ 92 __REQ_SORTED, /* elevator knows about this request */
93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
94 __REQ_HARDBARRIER, /* may not be passed by drive either */ 94 __REQ_HARDBARRIER, /* may not be passed by drive either */
95 __REQ_FUA, /* forced unit access */ 95 __REQ_FUA, /* forced unit access */
96 __REQ_NOMERGE, /* don't touch this for merging */ 96 __REQ_NOMERGE, /* don't touch this for merging */
97 __REQ_STARTED, /* drive already may have started this one */ 97 __REQ_STARTED, /* drive already may have started this one */
98 __REQ_DONTPREP, /* don't call prep for this one */ 98 __REQ_DONTPREP, /* don't call prep for this one */
99 __REQ_QUEUED, /* uses queueing */ 99 __REQ_QUEUED, /* uses queueing */
100 __REQ_ELVPRIV, /* elevator private data attached */ 100 __REQ_ELVPRIV, /* elevator private data attached */
101 __REQ_FAILED, /* set if the request failed */ 101 __REQ_FAILED, /* set if the request failed */
102 __REQ_QUIET, /* don't worry about errors */ 102 __REQ_QUIET, /* don't worry about errors */
103 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 103 __REQ_PREEMPT, /* set for "ide_preempt" requests */
104 __REQ_ORDERED_COLOR, /* is before or after barrier */ 104 __REQ_ORDERED_COLOR, /* is before or after barrier */
105 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 105 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
106 __REQ_ALLOCED, /* request came from our alloc pool */ 106 __REQ_ALLOCED, /* request came from our alloc pool */
107 __REQ_RW_META, /* metadata io request */ 107 __REQ_RW_META, /* metadata io request */
108 __REQ_COPY_USER, /* contains copies of user pages */ 108 __REQ_COPY_USER, /* contains copies of user pages */
109 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 109 __REQ_INTEGRITY, /* integrity metadata has been remapped */
110 __REQ_NR_BITS, /* stops here */ 110 __REQ_NR_BITS, /* stops here */
111 }; 111 };
112 112
113 #define REQ_RW (1 << __REQ_RW) 113 #define REQ_RW (1 << __REQ_RW)
114 #define REQ_FAILFAST (1 << __REQ_FAILFAST) 114 #define REQ_FAILFAST (1 << __REQ_FAILFAST)
115 #define REQ_SORTED (1 << __REQ_SORTED) 115 #define REQ_SORTED (1 << __REQ_SORTED)
116 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 116 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
117 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 117 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
118 #define REQ_FUA (1 << __REQ_FUA) 118 #define REQ_FUA (1 << __REQ_FUA)
119 #define REQ_NOMERGE (1 << __REQ_NOMERGE) 119 #define REQ_NOMERGE (1 << __REQ_NOMERGE)
120 #define REQ_STARTED (1 << __REQ_STARTED) 120 #define REQ_STARTED (1 << __REQ_STARTED)
121 #define REQ_DONTPREP (1 << __REQ_DONTPREP) 121 #define REQ_DONTPREP (1 << __REQ_DONTPREP)
122 #define REQ_QUEUED (1 << __REQ_QUEUED) 122 #define REQ_QUEUED (1 << __REQ_QUEUED)
123 #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 123 #define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
124 #define REQ_FAILED (1 << __REQ_FAILED) 124 #define REQ_FAILED (1 << __REQ_FAILED)
125 #define REQ_QUIET (1 << __REQ_QUIET) 125 #define REQ_QUIET (1 << __REQ_QUIET)
126 #define REQ_PREEMPT (1 << __REQ_PREEMPT) 126 #define REQ_PREEMPT (1 << __REQ_PREEMPT)
127 #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 127 #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
128 #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 128 #define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
129 #define REQ_ALLOCED (1 << __REQ_ALLOCED) 129 #define REQ_ALLOCED (1 << __REQ_ALLOCED)
130 #define REQ_RW_META (1 << __REQ_RW_META) 130 #define REQ_RW_META (1 << __REQ_RW_META)
131 #define REQ_COPY_USER (1 << __REQ_COPY_USER) 131 #define REQ_COPY_USER (1 << __REQ_COPY_USER)
132 #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 132 #define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
133 133
134 #define BLK_MAX_CDB 16 134 #define BLK_MAX_CDB 16
135 135
136 /* 136 /*
137 * try to put the fields that are referenced together in the same cacheline. 137 * try to put the fields that are referenced together in the same cacheline.
138 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 138 * if you modify this structure, be sure to check block/blk-core.c:rq_init()
139 * as well! 139 * as well!
140 */ 140 */
141 struct request { 141 struct request {
142 struct list_head queuelist; 142 struct list_head queuelist;
143 struct list_head donelist; 143 struct list_head donelist;
144 144
145 struct request_queue *q; 145 struct request_queue *q;
146 146
147 unsigned int cmd_flags; 147 unsigned int cmd_flags;
148 enum rq_cmd_type_bits cmd_type; 148 enum rq_cmd_type_bits cmd_type;
149 149
150 /* Maintain bio traversal state for part by part I/O submission. 150 /* Maintain bio traversal state for part by part I/O submission.
151 * hard_* are block layer internals, no driver should touch them! 151 * hard_* are block layer internals, no driver should touch them!
152 */ 152 */
153 153
154 sector_t sector; /* next sector to submit */ 154 sector_t sector; /* next sector to submit */
155 sector_t hard_sector; /* next sector to complete */ 155 sector_t hard_sector; /* next sector to complete */
156 unsigned long nr_sectors; /* no. of sectors left to submit */ 156 unsigned long nr_sectors; /* no. of sectors left to submit */
157 unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 157 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
158 /* no. of sectors left to submit in the current segment */ 158 /* no. of sectors left to submit in the current segment */
159 unsigned int current_nr_sectors; 159 unsigned int current_nr_sectors;
160 160
161 /* no. of sectors left to complete in the current segment */ 161 /* no. of sectors left to complete in the current segment */
162 unsigned int hard_cur_sectors; 162 unsigned int hard_cur_sectors;
163 163
164 struct bio *bio; 164 struct bio *bio;
165 struct bio *biotail; 165 struct bio *biotail;
166 166
167 struct hlist_node hash; /* merge hash */ 167 struct hlist_node hash; /* merge hash */
168 /* 168 /*
169 * The rb_node is only used inside the io scheduler, requests 169 * The rb_node is only used inside the io scheduler, requests
170 * are pruned when moved to the dispatch queue. So let the 170 * are pruned when moved to the dispatch queue. So let the
171 * completion_data share space with the rb_node. 171 * completion_data share space with the rb_node.
172 */ 172 */
173 union { 173 union {
174 struct rb_node rb_node; /* sort/lookup */ 174 struct rb_node rb_node; /* sort/lookup */
175 void *completion_data; 175 void *completion_data;
176 }; 176 };
177 177
178 /* 178 /*
179 * two pointers are available for the IO schedulers, if they need 179 * two pointers are available for the IO schedulers, if they need
180 * more they have to dynamically allocate it. 180 * more they have to dynamically allocate it.
181 */ 181 */
182 void *elevator_private; 182 void *elevator_private;
183 void *elevator_private2; 183 void *elevator_private2;
184 184
185 struct gendisk *rq_disk; 185 struct gendisk *rq_disk;
186 unsigned long start_time; 186 unsigned long start_time;
187 187
188 /* Number of scatter-gather DMA addr+len pairs after 188 /* Number of scatter-gather DMA addr+len pairs after
189 * physical address coalescing is performed. 189 * physical address coalescing is performed.
190 */ 190 */
191 unsigned short nr_phys_segments; 191 unsigned short nr_phys_segments;
192 192
193 /* Number of scatter-gather addr+len pairs after 193 /* Number of scatter-gather addr+len pairs after
194 * physical and DMA remapping hardware coalescing is performed. 194 * physical and DMA remapping hardware coalescing is performed.
195 * This is the number of scatter-gather entries the driver 195 * This is the number of scatter-gather entries the driver
196 * will actually have to deal with after DMA mapping is done. 196 * will actually have to deal with after DMA mapping is done.
197 */ 197 */
198 unsigned short nr_hw_segments; 198 unsigned short nr_hw_segments;
199 199
200 unsigned short ioprio; 200 unsigned short ioprio;
201 201
202 void *special; 202 void *special;
203 char *buffer; 203 char *buffer;
204 204
205 int tag; 205 int tag;
206 int errors; 206 int errors;
207 207
208 int ref_count; 208 int ref_count;
209 209
210 /* 210 /*
211 * when request is used as a packet command carrier 211 * when request is used as a packet command carrier
212 */ 212 */
213 unsigned short cmd_len; 213 unsigned short cmd_len;
214 unsigned char __cmd[BLK_MAX_CDB]; 214 unsigned char __cmd[BLK_MAX_CDB];
215 unsigned char *cmd; 215 unsigned char *cmd;
216 216
217 unsigned int data_len; 217 unsigned int data_len;
218 unsigned int extra_len; /* length of alignment and padding */ 218 unsigned int extra_len; /* length of alignment and padding */
219 unsigned int sense_len; 219 unsigned int sense_len;
220 void *data; 220 void *data;
221 void *sense; 221 void *sense;
222 222
223 unsigned int timeout; 223 unsigned int timeout;
224 int retries; 224 int retries;
225 225
226 /* 226 /*
227 * completion callback. 227 * completion callback.
228 */ 228 */
229 rq_end_io_fn *end_io; 229 rq_end_io_fn *end_io;
230 void *end_io_data; 230 void *end_io_data;
231 231
232 /* for bidi */ 232 /* for bidi */
233 struct request *next_rq; 233 struct request *next_rq;
234 }; 234 };
235 235
236 /* 236 /*
237 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 237 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
238 * requests. Some step values could eventually be made generic. 238 * requests. Some step values could eventually be made generic.
239 */ 239 */
240 struct request_pm_state 240 struct request_pm_state
241 { 241 {
242 /* PM state machine step value, currently driver specific */ 242 /* PM state machine step value, currently driver specific */
243 int pm_step; 243 int pm_step;
244 /* requested PM state value (S1, S2, S3, S4, ...) */ 244 /* requested PM state value (S1, S2, S3, S4, ...) */
245 u32 pm_state; 245 u32 pm_state;
246 void* data; /* for driver use */ 246 void* data; /* for driver use */
247 }; 247 };
248 248
249 #include <linux/elevator.h> 249 #include <linux/elevator.h>
250 250
251 typedef void (request_fn_proc) (struct request_queue *q); 251 typedef void (request_fn_proc) (struct request_queue *q);
252 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 252 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
253 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 253 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
254 typedef void (unplug_fn) (struct request_queue *); 254 typedef void (unplug_fn) (struct request_queue *);
255 255
256 struct bio_vec; 256 struct bio_vec;
257 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 257 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
258 typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 258 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
259 typedef void (softirq_done_fn)(struct request *); 259 typedef void (softirq_done_fn)(struct request *);
260 typedef int (dma_drain_needed_fn)(struct request *); 260 typedef int (dma_drain_needed_fn)(struct request *);
261 261
262 enum blk_queue_state { 262 enum blk_queue_state {
263 Queue_down, 263 Queue_down,
264 Queue_up, 264 Queue_up,
265 }; 265 };
266 266
267 struct blk_queue_tag { 267 struct blk_queue_tag {
268 struct request **tag_index; /* map of busy tags */ 268 struct request **tag_index; /* map of busy tags */
269 unsigned long *tag_map; /* bit map of free/busy tags */ 269 unsigned long *tag_map; /* bit map of free/busy tags */
270 int busy; /* current depth */ 270 int busy; /* current depth */
271 int max_depth; /* what we will send to device */ 271 int max_depth; /* what we will send to device */
272 int real_max_depth; /* what the array can hold */ 272 int real_max_depth; /* what the array can hold */
273 atomic_t refcnt; /* map can be shared */ 273 atomic_t refcnt; /* map can be shared */
274 }; 274 };
275 275
276 struct request_queue 276 struct request_queue
277 { 277 {
278 /* 278 /*
279 * Together with queue_head for cacheline sharing 279 * Together with queue_head for cacheline sharing
280 */ 280 */
281 struct list_head queue_head; 281 struct list_head queue_head;
282 struct request *last_merge; 282 struct request *last_merge;
283 elevator_t *elevator; 283 elevator_t *elevator;
284 284
285 /* 285 /*
286 * the queue request freelist, one for reads and one for writes 286 * the queue request freelist, one for reads and one for writes
287 */ 287 */
288 struct request_list rq; 288 struct request_list rq;
289 289
290 request_fn_proc *request_fn; 290 request_fn_proc *request_fn;
291 make_request_fn *make_request_fn; 291 make_request_fn *make_request_fn;
292 prep_rq_fn *prep_rq_fn; 292 prep_rq_fn *prep_rq_fn;
293 unplug_fn *unplug_fn; 293 unplug_fn *unplug_fn;
294 merge_bvec_fn *merge_bvec_fn; 294 merge_bvec_fn *merge_bvec_fn;
295 prepare_flush_fn *prepare_flush_fn; 295 prepare_flush_fn *prepare_flush_fn;
296 softirq_done_fn *softirq_done_fn; 296 softirq_done_fn *softirq_done_fn;
297 dma_drain_needed_fn *dma_drain_needed; 297 dma_drain_needed_fn *dma_drain_needed;
298 298
299 /* 299 /*
300 * Dispatch queue sorting 300 * Dispatch queue sorting
301 */ 301 */
302 sector_t end_sector; 302 sector_t end_sector;
303 struct request *boundary_rq; 303 struct request *boundary_rq;
304 304
305 /* 305 /*
306 * Auto-unplugging state 306 * Auto-unplugging state
307 */ 307 */
308 struct timer_list unplug_timer; 308 struct timer_list unplug_timer;
309 int unplug_thresh; /* After this many requests */ 309 int unplug_thresh; /* After this many requests */
310 unsigned long unplug_delay; /* After this many jiffies */ 310 unsigned long unplug_delay; /* After this many jiffies */
311 struct work_struct unplug_work; 311 struct work_struct unplug_work;
312 312
313 struct backing_dev_info backing_dev_info; 313 struct backing_dev_info backing_dev_info;
314 314
315 /* 315 /*
316 * The queue owner gets to use this for whatever they like. 316 * The queue owner gets to use this for whatever they like.
317 * ll_rw_blk doesn't touch it. 317 * ll_rw_blk doesn't touch it.
318 */ 318 */
319 void *queuedata; 319 void *queuedata;
320 320
321 /* 321 /*
322 * queue needs bounce pages for pages above this limit 322 * queue needs bounce pages for pages above this limit
323 */ 323 */
324 unsigned long bounce_pfn; 324 unsigned long bounce_pfn;
325 gfp_t bounce_gfp; 325 gfp_t bounce_gfp;
326 326
327 /* 327 /*
328 * various queue flags, see QUEUE_* below 328 * various queue flags, see QUEUE_* below
329 */ 329 */
330 unsigned long queue_flags; 330 unsigned long queue_flags;
331 331
332 /* 332 /*
333 * protects queue structures from reentrancy. ->__queue_lock should 333 * protects queue structures from reentrancy. ->__queue_lock should
334 * _never_ be used directly, it is queue private. always use 334 * _never_ be used directly, it is queue private. always use
335 * ->queue_lock. 335 * ->queue_lock.
336 */ 336 */
337 spinlock_t __queue_lock; 337 spinlock_t __queue_lock;
338 spinlock_t *queue_lock; 338 spinlock_t *queue_lock;
339 339
340 /* 340 /*
341 * queue kobject 341 * queue kobject
342 */ 342 */
343 struct kobject kobj; 343 struct kobject kobj;
344 344
345 /* 345 /*
346 * queue settings 346 * queue settings
347 */ 347 */
348 unsigned long nr_requests; /* Max # of requests */ 348 unsigned long nr_requests; /* Max # of requests */
349 unsigned int nr_congestion_on; 349 unsigned int nr_congestion_on;
350 unsigned int nr_congestion_off; 350 unsigned int nr_congestion_off;
351 unsigned int nr_batching; 351 unsigned int nr_batching;
352 352
353 unsigned int max_sectors; 353 unsigned int max_sectors;
354 unsigned int max_hw_sectors; 354 unsigned int max_hw_sectors;
355 unsigned short max_phys_segments; 355 unsigned short max_phys_segments;
356 unsigned short max_hw_segments; 356 unsigned short max_hw_segments;
357 unsigned short hardsect_size; 357 unsigned short hardsect_size;
358 unsigned int max_segment_size; 358 unsigned int max_segment_size;
359 359
360 unsigned long seg_boundary_mask; 360 unsigned long seg_boundary_mask;
361 void *dma_drain_buffer; 361 void *dma_drain_buffer;
362 unsigned int dma_drain_size; 362 unsigned int dma_drain_size;
363 unsigned int dma_pad_mask; 363 unsigned int dma_pad_mask;
364 unsigned int dma_alignment; 364 unsigned int dma_alignment;
365 365
366 struct blk_queue_tag *queue_tags; 366 struct blk_queue_tag *queue_tags;
367 struct list_head tag_busy_list; 367 struct list_head tag_busy_list;
368 368
369 unsigned int nr_sorted; 369 unsigned int nr_sorted;
370 unsigned int in_flight; 370 unsigned int in_flight;
371 371
372 /* 372 /*
373 * sg stuff 373 * sg stuff
374 */ 374 */
375 unsigned int sg_timeout; 375 unsigned int sg_timeout;
376 unsigned int sg_reserved_size; 376 unsigned int sg_reserved_size;
377 int node; 377 int node;
378 #ifdef CONFIG_BLK_DEV_IO_TRACE 378 #ifdef CONFIG_BLK_DEV_IO_TRACE
379 struct blk_trace *blk_trace; 379 struct blk_trace *blk_trace;
380 #endif 380 #endif
381 /* 381 /*
382 * reserved for flush operations 382 * reserved for flush operations
383 */ 383 */
384 unsigned int ordered, next_ordered, ordseq; 384 unsigned int ordered, next_ordered, ordseq;
385 int orderr, ordcolor; 385 int orderr, ordcolor;
386 struct request pre_flush_rq, bar_rq, post_flush_rq; 386 struct request pre_flush_rq, bar_rq, post_flush_rq;
387 struct request *orig_bar_rq; 387 struct request *orig_bar_rq;
388 388
389 struct mutex sysfs_lock; 389 struct mutex sysfs_lock;
390 390
391 #if defined(CONFIG_BLK_DEV_BSG) 391 #if defined(CONFIG_BLK_DEV_BSG)
392 struct bsg_class_device bsg_dev; 392 struct bsg_class_device bsg_dev;
393 #endif 393 #endif
394 }; 394 };
395 395
396 #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 396 #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
397 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 397 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
398 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 398 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
399 #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 399 #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
400 #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 400 #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
401 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 401 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */
402 #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 402 #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
403 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 403 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
404 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 404 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
405 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 405 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
406 #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 406 #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
407 407
408 static inline int queue_is_locked(struct request_queue *q) 408 static inline int queue_is_locked(struct request_queue *q)
409 { 409 {
410 #ifdef CONFIG_SMP 410 #ifdef CONFIG_SMP
411 spinlock_t *lock = q->queue_lock; 411 spinlock_t *lock = q->queue_lock;
412 return lock && spin_is_locked(lock); 412 return lock && spin_is_locked(lock);
413 #else 413 #else
414 return 1; 414 return 1;
415 #endif 415 #endif
416 } 416 }
417 417
418 static inline void queue_flag_set_unlocked(unsigned int flag, 418 static inline void queue_flag_set_unlocked(unsigned int flag,
419 struct request_queue *q) 419 struct request_queue *q)
420 { 420 {
421 __set_bit(flag, &q->queue_flags); 421 __set_bit(flag, &q->queue_flags);
422 } 422 }
423 423
424 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 424 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
425 { 425 {
426 WARN_ON_ONCE(!queue_is_locked(q)); 426 WARN_ON_ONCE(!queue_is_locked(q));
427 __set_bit(flag, &q->queue_flags); 427 __set_bit(flag, &q->queue_flags);
428 } 428 }
429 429
430 static inline void queue_flag_clear_unlocked(unsigned int flag, 430 static inline void queue_flag_clear_unlocked(unsigned int flag,
431 struct request_queue *q) 431 struct request_queue *q)
432 { 432 {
433 __clear_bit(flag, &q->queue_flags); 433 __clear_bit(flag, &q->queue_flags);
434 } 434 }
435 435
436 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 436 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
437 { 437 {
438 WARN_ON_ONCE(!queue_is_locked(q)); 438 WARN_ON_ONCE(!queue_is_locked(q));
439 __clear_bit(flag, &q->queue_flags); 439 __clear_bit(flag, &q->queue_flags);
440 } 440 }
441 441
442 enum { 442 enum {
443 /* 443 /*
444 * Hardbarrier is supported with one of the following methods. 444 * Hardbarrier is supported with one of the following methods.
445 * 445 *
446 * NONE : hardbarrier unsupported 446 * NONE : hardbarrier unsupported
447 * DRAIN : ordering by draining is enough 447 * DRAIN : ordering by draining is enough
448 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 448 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
449 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 449 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
450 * TAG : ordering by tag is enough 450 * TAG : ordering by tag is enough
451 * TAG_FLUSH : ordering by tag w/ pre and post flushes 451 * TAG_FLUSH : ordering by tag w/ pre and post flushes
452 * TAG_FUA : ordering by tag w/ pre flush and FUA write 452 * TAG_FUA : ordering by tag w/ pre flush and FUA write
453 */ 453 */
454 QUEUE_ORDERED_NONE = 0x00, 454 QUEUE_ORDERED_NONE = 0x00,
455 QUEUE_ORDERED_DRAIN = 0x01, 455 QUEUE_ORDERED_DRAIN = 0x01,
456 QUEUE_ORDERED_TAG = 0x02, 456 QUEUE_ORDERED_TAG = 0x02,
457 457
458 QUEUE_ORDERED_PREFLUSH = 0x10, 458 QUEUE_ORDERED_PREFLUSH = 0x10,
459 QUEUE_ORDERED_POSTFLUSH = 0x20, 459 QUEUE_ORDERED_POSTFLUSH = 0x20,
460 QUEUE_ORDERED_FUA = 0x40, 460 QUEUE_ORDERED_FUA = 0x40,
461 461
462 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 462 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
463 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 463 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
464 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 464 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
465 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 465 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
466 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 466 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
467 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 467 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
468 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 468 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
469 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 469 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
470 470
471 /* 471 /*
472 * Ordered operation sequence 472 * Ordered operation sequence
473 */ 473 */
474 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 474 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
475 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 475 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
476 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 476 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
477 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 477 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
478 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 478 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
479 QUEUE_ORDSEQ_DONE = 0x20, 479 QUEUE_ORDSEQ_DONE = 0x20,
480 }; 480 };
481 481
482 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 482 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
483 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 483 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
484 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 484 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
485 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 485 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
486 #define blk_queue_flushing(q) ((q)->ordseq) 486 #define blk_queue_flushing(q) ((q)->ordseq)
487 487
488 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 488 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
489 #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 489 #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
490 #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 490 #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
491 #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 491 #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
492 492
493 #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 493 #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
494 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 494 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
495 495
496 #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 496 #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
497 497
498 #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 498 #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
499 #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 499 #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
500 #define blk_pm_request(rq) \ 500 #define blk_pm_request(rq) \
501 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 501 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
502 502
503 #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 503 #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
504 #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 504 #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
505 #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 505 #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
506 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 506 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
507 #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 507 #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
508 /* rq->queuelist of dequeued request must be list_empty() */ 508 /* rq->queuelist of dequeued request must be list_empty() */
509 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 509 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
510 510
511 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 511 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
512 512
513 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 513 #define rq_data_dir(rq) ((rq)->cmd_flags & 1)
514 514
515 /* 515 /*
516 * We regard a request as sync, if it's a READ or a SYNC write. 516 * We regard a request as sync, if it's a READ or a SYNC write.
517 */ 517 */
518 #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 518 #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
519 #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 519 #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
520 520
521 static inline int blk_queue_full(struct request_queue *q, int rw) 521 static inline int blk_queue_full(struct request_queue *q, int rw)
522 { 522 {
523 if (rw == READ) 523 if (rw == READ)
524 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 524 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
525 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 525 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
526 } 526 }
527 527
528 static inline void blk_set_queue_full(struct request_queue *q, int rw) 528 static inline void blk_set_queue_full(struct request_queue *q, int rw)
529 { 529 {
530 if (rw == READ) 530 if (rw == READ)
531 queue_flag_set(QUEUE_FLAG_READFULL, q); 531 queue_flag_set(QUEUE_FLAG_READFULL, q);
532 else 532 else
533 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 533 queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
534 } 534 }
535 535
536 static inline void blk_clear_queue_full(struct request_queue *q, int rw) 536 static inline void blk_clear_queue_full(struct request_queue *q, int rw)
537 { 537 {
538 if (rw == READ) 538 if (rw == READ)
539 queue_flag_clear(QUEUE_FLAG_READFULL, q); 539 queue_flag_clear(QUEUE_FLAG_READFULL, q);
540 else 540 else
541 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 541 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
542 } 542 }
543 543
544 544
545 /* 545 /*
546 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 546 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
547 * it already be started by driver. 547 * it already be started by driver.
548 */ 548 */
549 #define RQ_NOMERGE_FLAGS \ 549 #define RQ_NOMERGE_FLAGS \
550 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 550 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
551 #define rq_mergeable(rq) \ 551 #define rq_mergeable(rq) \
552 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 552 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
553 553
554 /* 554 /*
555 * q->prep_rq_fn return values 555 * q->prep_rq_fn return values
556 */ 556 */
557 #define BLKPREP_OK 0 /* serve it */ 557 #define BLKPREP_OK 0 /* serve it */
558 #define BLKPREP_KILL 1 /* fatal error, kill */ 558 #define BLKPREP_KILL 1 /* fatal error, kill */
559 #define BLKPREP_DEFER 2 /* leave on queue */ 559 #define BLKPREP_DEFER 2 /* leave on queue */
560 560
561 extern unsigned long blk_max_low_pfn, blk_max_pfn; 561 extern unsigned long blk_max_low_pfn, blk_max_pfn;
562 562
563 /* 563 /*
564 * standard bounce addresses: 564 * standard bounce addresses:
565 * 565 *
566 * BLK_BOUNCE_HIGH : bounce all highmem pages 566 * BLK_BOUNCE_HIGH : bounce all highmem pages
567 * BLK_BOUNCE_ANY : don't bounce anything 567 * BLK_BOUNCE_ANY : don't bounce anything
568 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 568 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
569 */ 569 */
570 570
571 #if BITS_PER_LONG == 32 571 #if BITS_PER_LONG == 32
572 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 572 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
573 #else 573 #else
574 #define BLK_BOUNCE_HIGH -1ULL 574 #define BLK_BOUNCE_HIGH -1ULL
575 #endif 575 #endif
576 #define BLK_BOUNCE_ANY (-1ULL) 576 #define BLK_BOUNCE_ANY (-1ULL)
577 #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 577 #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
578 578
579 /* 579 /*
580 * default timeout for SG_IO if none specified 580 * default timeout for SG_IO if none specified
581 */ 581 */
582 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 582 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
583 583
584 #ifdef CONFIG_BOUNCE 584 #ifdef CONFIG_BOUNCE
585 extern int init_emergency_isa_pool(void); 585 extern int init_emergency_isa_pool(void);
586 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 586 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
587 #else 587 #else
588 static inline int init_emergency_isa_pool(void) 588 static inline int init_emergency_isa_pool(void)
589 { 589 {
590 return 0; 590 return 0;
591 } 591 }
592 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 592 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
593 { 593 {
594 } 594 }
595 #endif /* CONFIG_MMU */ 595 #endif /* CONFIG_MMU */
596 596
597 struct req_iterator { 597 struct req_iterator {
598 int i; 598 int i;
599 struct bio *bio; 599 struct bio *bio;
600 }; 600 };
601 601
602 /* This should not be used directly - use rq_for_each_segment */ 602 /* This should not be used directly - use rq_for_each_segment */
603 #define __rq_for_each_bio(_bio, rq) \ 603 #define __rq_for_each_bio(_bio, rq) \
604 if ((rq->bio)) \ 604 if ((rq->bio)) \
605 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 605 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
606 606
607 #define rq_for_each_segment(bvl, _rq, _iter) \ 607 #define rq_for_each_segment(bvl, _rq, _iter) \
608 __rq_for_each_bio(_iter.bio, _rq) \ 608 __rq_for_each_bio(_iter.bio, _rq) \
609 bio_for_each_segment(bvl, _iter.bio, _iter.i) 609 bio_for_each_segment(bvl, _iter.bio, _iter.i)
610 610
611 #define rq_iter_last(rq, _iter) \ 611 #define rq_iter_last(rq, _iter) \
612 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 612 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
613 613
614 extern int blk_register_queue(struct gendisk *disk); 614 extern int blk_register_queue(struct gendisk *disk);
615 extern void blk_unregister_queue(struct gendisk *disk); 615 extern void blk_unregister_queue(struct gendisk *disk);
616 extern void register_disk(struct gendisk *dev); 616 extern void register_disk(struct gendisk *dev);
617 extern void generic_make_request(struct bio *bio); 617 extern void generic_make_request(struct bio *bio);
618 extern void blk_rq_init(struct request_queue *q, struct request *rq); 618 extern void blk_rq_init(struct request_queue *q, struct request *rq);
619 extern void blk_put_request(struct request *); 619 extern void blk_put_request(struct request *);
620 extern void __blk_put_request(struct request_queue *, struct request *); 620 extern void __blk_put_request(struct request_queue *, struct request *);
621 extern void blk_end_sync_rq(struct request *rq, int error); 621 extern void blk_end_sync_rq(struct request *rq, int error);
622 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 622 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
623 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 623 extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
624 extern void blk_requeue_request(struct request_queue *, struct request *); 624 extern void blk_requeue_request(struct request_queue *, struct request *);
625 extern void blk_plug_device(struct request_queue *); 625 extern void blk_plug_device(struct request_queue *);
626 extern int blk_remove_plug(struct request_queue *); 626 extern int blk_remove_plug(struct request_queue *);
627 extern void blk_recount_segments(struct request_queue *, struct bio *); 627 extern void blk_recount_segments(struct request_queue *, struct bio *);
628 extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 628 extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
629 struct gendisk *, unsigned int, void __user *); 629 struct gendisk *, unsigned int, void __user *);
630 extern int sg_scsi_ioctl(struct file *, struct request_queue *, 630 extern int sg_scsi_ioctl(struct file *, struct request_queue *,
631 struct gendisk *, struct scsi_ioctl_command __user *); 631 struct gendisk *, struct scsi_ioctl_command __user *);
632 632
633 /* 633 /*
634 * Temporary export, until SCSI gets fixed up. 634 * Temporary export, until SCSI gets fixed up.
635 */ 635 */
636 extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 636 extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
637 struct bio *bio); 637 struct bio *bio);
638 638
639 /* 639 /*
640 * A queue has just exitted congestion. Note this in the global counter of 640 * A queue has just exitted congestion. Note this in the global counter of
641 * congested queues, and wake up anyone who was waiting for requests to be 641 * congested queues, and wake up anyone who was waiting for requests to be
642 * put back. 642 * put back.
643 */ 643 */
644 static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 644 static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
645 { 645 {
646 clear_bdi_congested(&q->backing_dev_info, rw); 646 clear_bdi_congested(&q->backing_dev_info, rw);
647 } 647 }
648 648
649 /* 649 /*
650 * A queue has just entered congestion. Flag that in the queue's VM-visible 650 * A queue has just entered congestion. Flag that in the queue's VM-visible
651 * state flags and increment the global gounter of congested queues. 651 * state flags and increment the global gounter of congested queues.
652 */ 652 */
653 static inline void blk_set_queue_congested(struct request_queue *q, int rw) 653 static inline void blk_set_queue_congested(struct request_queue *q, int rw)
654 { 654 {
655 set_bdi_congested(&q->backing_dev_info, rw); 655 set_bdi_congested(&q->backing_dev_info, rw);
656 } 656 }
657 657
658 extern void blk_start_queue(struct request_queue *q); 658 extern void blk_start_queue(struct request_queue *q);
659 extern void blk_stop_queue(struct request_queue *q); 659 extern void blk_stop_queue(struct request_queue *q);
660 extern void blk_sync_queue(struct request_queue *q); 660 extern void blk_sync_queue(struct request_queue *q);
661 extern void __blk_stop_queue(struct request_queue *q); 661 extern void __blk_stop_queue(struct request_queue *q);
662 extern void __blk_run_queue(struct request_queue *); 662 extern void __blk_run_queue(struct request_queue *);
663 extern void blk_run_queue(struct request_queue *); 663 extern void blk_run_queue(struct request_queue *);
664 extern void blk_start_queueing(struct request_queue *); 664 extern void blk_start_queueing(struct request_queue *);
665 extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 665 extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
666 extern int blk_rq_unmap_user(struct bio *); 666 extern int blk_rq_unmap_user(struct bio *);
667 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 667 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
668 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 668 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
669 struct sg_iovec *, int, unsigned int); 669 struct sg_iovec *, int, unsigned int);
670 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 670 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
671 struct request *, int); 671 struct request *, int);
672 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 672 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
673 struct request *, int, rq_end_io_fn *); 673 struct request *, int, rq_end_io_fn *);
674 extern int blk_verify_command(unsigned char *, int);
675 extern void blk_unplug(struct request_queue *q); 674 extern void blk_unplug(struct request_queue *q);
676 675
677 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 676 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
678 { 677 {
679 return bdev->bd_disk->queue; 678 return bdev->bd_disk->queue;
680 } 679 }
681 680
682 static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 681 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
683 struct page *page) 682 struct page *page)
684 { 683 {
685 if (bdi && bdi->unplug_io_fn) 684 if (bdi && bdi->unplug_io_fn)
686 bdi->unplug_io_fn(bdi, page); 685 bdi->unplug_io_fn(bdi, page);
687 } 686 }
688 687
689 static inline void blk_run_address_space(struct address_space *mapping) 688 static inline void blk_run_address_space(struct address_space *mapping)
690 { 689 {
691 if (mapping) 690 if (mapping)
692 blk_run_backing_dev(mapping->backing_dev_info, NULL); 691 blk_run_backing_dev(mapping->backing_dev_info, NULL);
693 } 692 }
694 693
695 /* 694 /*
696 * blk_end_request() and friends. 695 * blk_end_request() and friends.
697 * __blk_end_request() and end_request() must be called with 696 * __blk_end_request() and end_request() must be called with
698 * the request queue spinlock acquired. 697 * the request queue spinlock acquired.
699 * 698 *
700 * Several drivers define their own end_request and call 699 * Several drivers define their own end_request and call
701 * blk_end_request() for parts of the original function. 700 * blk_end_request() for parts of the original function.
702 * This prevents code duplication in drivers. 701 * This prevents code duplication in drivers.
703 */ 702 */
704 extern int blk_end_request(struct request *rq, int error, 703 extern int blk_end_request(struct request *rq, int error,
705 unsigned int nr_bytes); 704 unsigned int nr_bytes);
706 extern int __blk_end_request(struct request *rq, int error, 705 extern int __blk_end_request(struct request *rq, int error,
707 unsigned int nr_bytes); 706 unsigned int nr_bytes);
708 extern int blk_end_bidi_request(struct request *rq, int error, 707 extern int blk_end_bidi_request(struct request *rq, int error,
709 unsigned int nr_bytes, unsigned int bidi_bytes); 708 unsigned int nr_bytes, unsigned int bidi_bytes);
710 extern void end_request(struct request *, int); 709 extern void end_request(struct request *, int);
711 extern void end_queued_request(struct request *, int); 710 extern void end_queued_request(struct request *, int);
712 extern void end_dequeued_request(struct request *, int); 711 extern void end_dequeued_request(struct request *, int);
713 extern int blk_end_request_callback(struct request *rq, int error, 712 extern int blk_end_request_callback(struct request *rq, int error,
714 unsigned int nr_bytes, 713 unsigned int nr_bytes,
715 int (drv_callback)(struct request *)); 714 int (drv_callback)(struct request *));
716 extern void blk_complete_request(struct request *); 715 extern void blk_complete_request(struct request *);
717 716
718 /* 717 /*
719 * blk_end_request() takes bytes instead of sectors as a complete size. 718 * blk_end_request() takes bytes instead of sectors as a complete size.
720 * blk_rq_bytes() returns bytes left to complete in the entire request. 719 * blk_rq_bytes() returns bytes left to complete in the entire request.
721 * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 720 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
722 */ 721 */
723 extern unsigned int blk_rq_bytes(struct request *rq); 722 extern unsigned int blk_rq_bytes(struct request *rq);
724 extern unsigned int blk_rq_cur_bytes(struct request *rq); 723 extern unsigned int blk_rq_cur_bytes(struct request *rq);
725 724
726 static inline void blkdev_dequeue_request(struct request *req) 725 static inline void blkdev_dequeue_request(struct request *req)
727 { 726 {
728 elv_dequeue_request(req->q, req); 727 elv_dequeue_request(req->q, req);
729 } 728 }
730 729
731 /* 730 /*
732 * Access functions for manipulating queue properties 731 * Access functions for manipulating queue properties
733 */ 732 */
734 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 733 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
735 spinlock_t *lock, int node_id); 734 spinlock_t *lock, int node_id);
736 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 735 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
737 extern void blk_cleanup_queue(struct request_queue *); 736 extern void blk_cleanup_queue(struct request_queue *);
738 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 737 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
739 extern void blk_queue_bounce_limit(struct request_queue *, u64); 738 extern void blk_queue_bounce_limit(struct request_queue *, u64);
740 extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 739 extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
741 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 740 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
742 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 741 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
743 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 742 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
744 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 743 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
745 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 744 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
746 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 745 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
747 extern int blk_queue_dma_drain(struct request_queue *q, 746 extern int blk_queue_dma_drain(struct request_queue *q,
748 dma_drain_needed_fn *dma_drain_needed, 747 dma_drain_needed_fn *dma_drain_needed,
749 void *buf, unsigned int size); 748 void *buf, unsigned int size);
750 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 749 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
751 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 750 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
752 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 751 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
753 extern void blk_queue_dma_alignment(struct request_queue *, int); 752 extern void blk_queue_dma_alignment(struct request_queue *, int);
754 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 753 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
755 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 754 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
756 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 755 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
757 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 756 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
758 extern int blk_do_ordered(struct request_queue *, struct request **); 757 extern int blk_do_ordered(struct request_queue *, struct request **);
759 extern unsigned blk_ordered_cur_seq(struct request_queue *); 758 extern unsigned blk_ordered_cur_seq(struct request_queue *);
760 extern unsigned blk_ordered_req_seq(struct request *); 759 extern unsigned blk_ordered_req_seq(struct request *);
761 extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 760 extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
762 761
763 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 762 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
764 extern void blk_dump_rq_flags(struct request *, char *); 763 extern void blk_dump_rq_flags(struct request *, char *);
765 extern void generic_unplug_device(struct request_queue *); 764 extern void generic_unplug_device(struct request_queue *);
766 extern void __generic_unplug_device(struct request_queue *); 765 extern void __generic_unplug_device(struct request_queue *);
767 extern long nr_blockdev_pages(void); 766 extern long nr_blockdev_pages(void);
768 767
769 int blk_get_queue(struct request_queue *); 768 int blk_get_queue(struct request_queue *);
770 struct request_queue *blk_alloc_queue(gfp_t); 769 struct request_queue *blk_alloc_queue(gfp_t);
771 struct request_queue *blk_alloc_queue_node(gfp_t, int); 770 struct request_queue *blk_alloc_queue_node(gfp_t, int);
772 extern void blk_put_queue(struct request_queue *); 771 extern void blk_put_queue(struct request_queue *);
773 772
774 /* 773 /*
775 * tag stuff 774 * tag stuff
776 */ 775 */
777 #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 776 #define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
778 #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 777 #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
779 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 778 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
780 extern int blk_queue_start_tag(struct request_queue *, struct request *); 779 extern int blk_queue_start_tag(struct request_queue *, struct request *);
781 extern struct request *blk_queue_find_tag(struct request_queue *, int); 780 extern struct request *blk_queue_find_tag(struct request_queue *, int);
782 extern void blk_queue_end_tag(struct request_queue *, struct request *); 781 extern void blk_queue_end_tag(struct request_queue *, struct request *);
783 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 782 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
784 extern void blk_queue_free_tags(struct request_queue *); 783 extern void blk_queue_free_tags(struct request_queue *);
785 extern int blk_queue_resize_tags(struct request_queue *, int); 784 extern int blk_queue_resize_tags(struct request_queue *, int);
786 extern void blk_queue_invalidate_tags(struct request_queue *); 785 extern void blk_queue_invalidate_tags(struct request_queue *);
787 extern struct blk_queue_tag *blk_init_tags(int); 786 extern struct blk_queue_tag *blk_init_tags(int);
788 extern void blk_free_tags(struct blk_queue_tag *); 787 extern void blk_free_tags(struct blk_queue_tag *);
789 788
790 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 789 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
791 int tag) 790 int tag)
792 { 791 {
793 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 792 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
794 return NULL; 793 return NULL;
795 return bqt->tag_index[tag]; 794 return bqt->tag_index[tag];
796 } 795 }
797 796
798 extern int blkdev_issue_flush(struct block_device *, sector_t *); 797 extern int blkdev_issue_flush(struct block_device *, sector_t *);
798
799 /*
800 * command filter functions
801 */
802 extern int blk_verify_command(struct file *file, unsigned char *cmd);
803 extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
804 unsigned char *cmd, mode_t *f_mode);
805 extern int blk_register_filter(struct gendisk *disk);
806 extern void blk_unregister_filter(struct gendisk *disk);
799 807
800 #define MAX_PHYS_SEGMENTS 128 808 #define MAX_PHYS_SEGMENTS 128
801 #define MAX_HW_SEGMENTS 128 809 #define MAX_HW_SEGMENTS 128
802 #define SAFE_MAX_SECTORS 255 810 #define SAFE_MAX_SECTORS 255
803 #define BLK_DEF_MAX_SECTORS 1024 811 #define BLK_DEF_MAX_SECTORS 1024
804 812
805 #define MAX_SEGMENT_SIZE 65536 813 #define MAX_SEGMENT_SIZE 65536
806 814
807 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 815 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
808 816
809 static inline int queue_hardsect_size(struct request_queue *q) 817 static inline int queue_hardsect_size(struct request_queue *q)
810 { 818 {
811 int retval = 512; 819 int retval = 512;
812 820
813 if (q && q->hardsect_size) 821 if (q && q->hardsect_size)
814 retval = q->hardsect_size; 822 retval = q->hardsect_size;
815 823
816 return retval; 824 return retval;
817 } 825 }
818 826
819 static inline int bdev_hardsect_size(struct block_device *bdev) 827 static inline int bdev_hardsect_size(struct block_device *bdev)
820 { 828 {
821 return queue_hardsect_size(bdev_get_queue(bdev)); 829 return queue_hardsect_size(bdev_get_queue(bdev));
822 } 830 }
823 831
824 static inline int queue_dma_alignment(struct request_queue *q) 832 static inline int queue_dma_alignment(struct request_queue *q)
825 { 833 {
826 return q ? q->dma_alignment : 511; 834 return q ? q->dma_alignment : 511;
827 } 835 }
828 836
829 /* assumes size > 256 */ 837 /* assumes size > 256 */
830 static inline unsigned int blksize_bits(unsigned int size) 838 static inline unsigned int blksize_bits(unsigned int size)
831 { 839 {
832 unsigned int bits = 8; 840 unsigned int bits = 8;
833 do { 841 do {
834 bits++; 842 bits++;
835 size >>= 1; 843 size >>= 1;
836 } while (size > 256); 844 } while (size > 256);
837 return bits; 845 return bits;
838 } 846 }
839 847
840 static inline unsigned int block_size(struct block_device *bdev) 848 static inline unsigned int block_size(struct block_device *bdev)
841 { 849 {
842 return bdev->bd_block_size; 850 return bdev->bd_block_size;
843 } 851 }
844 852
845 typedef struct {struct page *v;} Sector; 853 typedef struct {struct page *v;} Sector;
846 854
847 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 855 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
848 856
849 static inline void put_dev_sector(Sector p) 857 static inline void put_dev_sector(Sector p)
850 { 858 {
851 page_cache_release(p.v); 859 page_cache_release(p.v);
852 } 860 }
853 861
854 struct work_struct; 862 struct work_struct;
855 int kblockd_schedule_work(struct work_struct *work); 863 int kblockd_schedule_work(struct work_struct *work);
856 void kblockd_flush_work(struct work_struct *work); 864 void kblockd_flush_work(struct work_struct *work);
857 865
858 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 866 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
859 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 867 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
860 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 868 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
861 MODULE_ALIAS("block-major-" __stringify(major) "-*") 869 MODULE_ALIAS("block-major-" __stringify(major) "-*")
862 870
863 #if defined(CONFIG_BLK_DEV_INTEGRITY) 871 #if defined(CONFIG_BLK_DEV_INTEGRITY)
864 872
865 #define INTEGRITY_FLAG_READ 1 /* verify data integrity on read */ 873 #define INTEGRITY_FLAG_READ 1 /* verify data integrity on read */
866 #define INTEGRITY_FLAG_WRITE 2 /* generate data integrity on write */ 874 #define INTEGRITY_FLAG_WRITE 2 /* generate data integrity on write */
867 875
868 struct blk_integrity_exchg { 876 struct blk_integrity_exchg {
869 void *prot_buf; 877 void *prot_buf;
870 void *data_buf; 878 void *data_buf;
871 sector_t sector; 879 sector_t sector;
872 unsigned int data_size; 880 unsigned int data_size;
873 unsigned short sector_size; 881 unsigned short sector_size;
874 const char *disk_name; 882 const char *disk_name;
875 }; 883 };
876 884
877 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 885 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
878 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 886 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
879 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 887 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
880 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 888 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
881 889
882 struct blk_integrity { 890 struct blk_integrity {
883 integrity_gen_fn *generate_fn; 891 integrity_gen_fn *generate_fn;
884 integrity_vrfy_fn *verify_fn; 892 integrity_vrfy_fn *verify_fn;
885 integrity_set_tag_fn *set_tag_fn; 893 integrity_set_tag_fn *set_tag_fn;
886 integrity_get_tag_fn *get_tag_fn; 894 integrity_get_tag_fn *get_tag_fn;
887 895
888 unsigned short flags; 896 unsigned short flags;
889 unsigned short tuple_size; 897 unsigned short tuple_size;
890 unsigned short sector_size; 898 unsigned short sector_size;
891 unsigned short tag_size; 899 unsigned short tag_size;
892 900
893 const char *name; 901 const char *name;
894 902
895 struct kobject kobj; 903 struct kobject kobj;
896 }; 904 };
897 905
898 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 906 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
899 extern void blk_integrity_unregister(struct gendisk *); 907 extern void blk_integrity_unregister(struct gendisk *);
900 extern int blk_integrity_compare(struct block_device *, struct block_device *); 908 extern int blk_integrity_compare(struct block_device *, struct block_device *);
901 extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 909 extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
902 extern int blk_rq_count_integrity_sg(struct request *); 910 extern int blk_rq_count_integrity_sg(struct request *);
903 911
904 static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) 912 static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
905 { 913 {
906 if (bi) 914 if (bi)
907 return bi->tuple_size; 915 return bi->tuple_size;
908 916
909 return 0; 917 return 0;
910 } 918 }
911 919
912 static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 920 static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
913 { 921 {
914 return bdev->bd_disk->integrity; 922 return bdev->bd_disk->integrity;
915 } 923 }
916 924
917 static inline unsigned int bdev_get_tag_size(struct block_device *bdev) 925 static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
918 { 926 {
919 struct blk_integrity *bi = bdev_get_integrity(bdev); 927 struct blk_integrity *bi = bdev_get_integrity(bdev);
920 928
921 if (bi) 929 if (bi)
922 return bi->tag_size; 930 return bi->tag_size;
923 931
924 return 0; 932 return 0;
925 } 933 }
926 934
927 static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) 935 static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
928 { 936 {
929 struct blk_integrity *bi = bdev_get_integrity(bdev); 937 struct blk_integrity *bi = bdev_get_integrity(bdev);
930 938
931 if (bi == NULL) 939 if (bi == NULL)
932 return 0; 940 return 0;
933 941
934 if (rw == READ && bi->verify_fn != NULL && 942 if (rw == READ && bi->verify_fn != NULL &&
935 test_bit(INTEGRITY_FLAG_READ, &bi->flags)) 943 test_bit(INTEGRITY_FLAG_READ, &bi->flags))
936 return 1; 944 return 1;
937 945
938 if (rw == WRITE && bi->generate_fn != NULL && 946 if (rw == WRITE && bi->generate_fn != NULL &&
939 test_bit(INTEGRITY_FLAG_WRITE, &bi->flags)) 947 test_bit(INTEGRITY_FLAG_WRITE, &bi->flags))
940 return 1; 948 return 1;
941 949
942 return 0; 950 return 0;
943 } 951 }
944 952
945 static inline int blk_integrity_rq(struct request *rq) 953 static inline int blk_integrity_rq(struct request *rq)
946 { 954 {
947 return bio_integrity(rq->bio); 955 return bio_integrity(rq->bio);
948 } 956 }
949 957
950 #else /* CONFIG_BLK_DEV_INTEGRITY */ 958 #else /* CONFIG_BLK_DEV_INTEGRITY */
951 959
952 #define blk_integrity_rq(rq) (0) 960 #define blk_integrity_rq(rq) (0)
953 #define blk_rq_count_integrity_sg(a) (0) 961 #define blk_rq_count_integrity_sg(a) (0)
954 #define blk_rq_map_integrity_sg(a, b) (0) 962 #define blk_rq_map_integrity_sg(a, b) (0)
955 #define bdev_get_integrity(a) (0) 963 #define bdev_get_integrity(a) (0)
956 #define bdev_get_tag_size(a) (0) 964 #define bdev_get_tag_size(a) (0)
957 #define blk_integrity_compare(a, b) (0) 965 #define blk_integrity_compare(a, b) (0)
958 #define blk_integrity_register(a, b) (0) 966 #define blk_integrity_register(a, b) (0)
959 #define blk_integrity_unregister(a) do { } while (0); 967 #define blk_integrity_unregister(a) do { } while (0);
960 968
961 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 969 #endif /* CONFIG_BLK_DEV_INTEGRITY */
962 970
963 #else /* CONFIG_BLOCK */ 971 #else /* CONFIG_BLOCK */
964 /* 972 /*
965 * stubs for when the block layer is configured out 973 * stubs for when the block layer is configured out
966 */ 974 */
967 #define buffer_heads_over_limit 0 975 #define buffer_heads_over_limit 0
968 976
969 static inline long nr_blockdev_pages(void) 977 static inline long nr_blockdev_pages(void)
970 { 978 {
971 return 0; 979 return 0;
972 } 980 }
973 981
974 #endif /* CONFIG_BLOCK */ 982 #endif /* CONFIG_BLOCK */
975 983
976 #endif 984 #endif
include/linux/genhd.h
1 #ifndef _LINUX_GENHD_H 1 #ifndef _LINUX_GENHD_H
2 #define _LINUX_GENHD_H 2 #define _LINUX_GENHD_H
3 3
4 /* 4 /*
5 * genhd.h Copyright (C) 1992 Drew Eckhardt 5 * genhd.h Copyright (C) 1992 Drew Eckhardt
6 * Generic hard disk header file by 6 * Generic hard disk header file by
7 * Drew Eckhardt 7 * Drew Eckhardt
8 * 8 *
9 * <drew@colorado.edu> 9 * <drew@colorado.edu>
10 */ 10 */
11 11
12 #include <linux/types.h> 12 #include <linux/types.h>
13 #include <linux/kdev_t.h> 13 #include <linux/kdev_t.h>
14 14
15 #ifdef CONFIG_BLOCK 15 #ifdef CONFIG_BLOCK
16 16
17 #define kobj_to_dev(k) container_of(k, struct device, kobj) 17 #define kobj_to_dev(k) container_of(k, struct device, kobj)
18 #define dev_to_disk(device) container_of(device, struct gendisk, dev) 18 #define dev_to_disk(device) container_of(device, struct gendisk, dev)
19 #define dev_to_part(device) container_of(device, struct hd_struct, dev) 19 #define dev_to_part(device) container_of(device, struct hd_struct, dev)
20 20
21 extern struct device_type part_type; 21 extern struct device_type part_type;
22 extern struct kobject *block_depr; 22 extern struct kobject *block_depr;
23 extern struct class block_class; 23 extern struct class block_class;
24 24
25 extern const struct seq_operations partitions_op; 25 extern const struct seq_operations partitions_op;
26 extern const struct seq_operations diskstats_op; 26 extern const struct seq_operations diskstats_op;
27 27
28 enum { 28 enum {
29 /* These three have identical behaviour; use the second one if DOS FDISK gets 29 /* These three have identical behaviour; use the second one if DOS FDISK gets
30 confused about extended/logical partitions starting past cylinder 1023. */ 30 confused about extended/logical partitions starting past cylinder 1023. */
31 DOS_EXTENDED_PARTITION = 5, 31 DOS_EXTENDED_PARTITION = 5,
32 LINUX_EXTENDED_PARTITION = 0x85, 32 LINUX_EXTENDED_PARTITION = 0x85,
33 WIN98_EXTENDED_PARTITION = 0x0f, 33 WIN98_EXTENDED_PARTITION = 0x0f,
34 34
35 SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION, 35 SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION,
36 36
37 LINUX_SWAP_PARTITION = 0x82, 37 LINUX_SWAP_PARTITION = 0x82,
38 LINUX_DATA_PARTITION = 0x83, 38 LINUX_DATA_PARTITION = 0x83,
39 LINUX_LVM_PARTITION = 0x8e, 39 LINUX_LVM_PARTITION = 0x8e,
40 LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ 40 LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
41 41
42 SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION, 42 SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION,
43 NEW_SOLARIS_X86_PARTITION = 0xbf, 43 NEW_SOLARIS_X86_PARTITION = 0xbf,
44 44
45 DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ 45 DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
46 DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ 46 DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
47 DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ 47 DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
48 EZD_PARTITION = 0x55, /* EZ-DRIVE */ 48 EZD_PARTITION = 0x55, /* EZ-DRIVE */
49 49
50 FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ 50 FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
51 OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ 51 OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
52 NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ 52 NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
53 BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ 53 BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
54 MINIX_PARTITION = 0x81, /* Minix Partition ID */ 54 MINIX_PARTITION = 0x81, /* Minix Partition ID */
55 UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ 55 UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
56 }; 56 };
57 57
58 #include <linux/major.h> 58 #include <linux/major.h>
59 #include <linux/device.h> 59 #include <linux/device.h>
60 #include <linux/smp.h> 60 #include <linux/smp.h>
61 #include <linux/string.h> 61 #include <linux/string.h>
62 #include <linux/fs.h> 62 #include <linux/fs.h>
63 #include <linux/workqueue.h> 63 #include <linux/workqueue.h>
64 64
65 struct partition { 65 struct partition {
66 unsigned char boot_ind; /* 0x80 - active */ 66 unsigned char boot_ind; /* 0x80 - active */
67 unsigned char head; /* starting head */ 67 unsigned char head; /* starting head */
68 unsigned char sector; /* starting sector */ 68 unsigned char sector; /* starting sector */
69 unsigned char cyl; /* starting cylinder */ 69 unsigned char cyl; /* starting cylinder */
70 unsigned char sys_ind; /* What partition type */ 70 unsigned char sys_ind; /* What partition type */
71 unsigned char end_head; /* end head */ 71 unsigned char end_head; /* end head */
72 unsigned char end_sector; /* end sector */ 72 unsigned char end_sector; /* end sector */
73 unsigned char end_cyl; /* end cylinder */ 73 unsigned char end_cyl; /* end cylinder */
74 __le32 start_sect; /* starting sector counting from 0 */ 74 __le32 start_sect; /* starting sector counting from 0 */
75 __le32 nr_sects; /* nr of sectors in partition */ 75 __le32 nr_sects; /* nr of sectors in partition */
76 } __attribute__((packed)); 76 } __attribute__((packed));
77 77
78 struct disk_stats { 78 struct disk_stats {
79 unsigned long sectors[2]; /* READs and WRITEs */ 79 unsigned long sectors[2]; /* READs and WRITEs */
80 unsigned long ios[2]; 80 unsigned long ios[2];
81 unsigned long merges[2]; 81 unsigned long merges[2];
82 unsigned long ticks[2]; 82 unsigned long ticks[2];
83 unsigned long io_ticks; 83 unsigned long io_ticks;
84 unsigned long time_in_queue; 84 unsigned long time_in_queue;
85 }; 85 };
86 86
87 struct hd_struct { 87 struct hd_struct {
88 sector_t start_sect; 88 sector_t start_sect;
89 sector_t nr_sects; 89 sector_t nr_sects;
90 struct device dev; 90 struct device dev;
91 struct kobject *holder_dir; 91 struct kobject *holder_dir;
92 int policy, partno; 92 int policy, partno;
93 #ifdef CONFIG_FAIL_MAKE_REQUEST 93 #ifdef CONFIG_FAIL_MAKE_REQUEST
94 int make_it_fail; 94 int make_it_fail;
95 #endif 95 #endif
96 unsigned long stamp; 96 unsigned long stamp;
97 int in_flight; 97 int in_flight;
98 #ifdef CONFIG_SMP 98 #ifdef CONFIG_SMP
99 struct disk_stats *dkstats; 99 struct disk_stats *dkstats;
100 #else 100 #else
101 struct disk_stats dkstats; 101 struct disk_stats dkstats;
102 #endif 102 #endif
103 }; 103 };
104 104
105 #define GENHD_FL_REMOVABLE 1 105 #define GENHD_FL_REMOVABLE 1
106 #define GENHD_FL_DRIVERFS 2 106 #define GENHD_FL_DRIVERFS 2
107 #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 107 #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
108 #define GENHD_FL_CD 8 108 #define GENHD_FL_CD 8
109 #define GENHD_FL_UP 16 109 #define GENHD_FL_UP 16
110 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 110 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32
111 #define GENHD_FL_FAIL 64 111 #define GENHD_FL_FAIL 64
112 112
113 #define BLK_SCSI_MAX_CMDS (256)
114 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
113 115
116 struct blk_scsi_cmd_filter {
117 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
118 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
119 struct kobject kobj;
120 };
121
114 struct gendisk { 122 struct gendisk {
115 int major; /* major number of driver */ 123 int major; /* major number of driver */
116 int first_minor; 124 int first_minor;
117 int minors; /* maximum number of minors, =1 for 125 int minors; /* maximum number of minors, =1 for
118 * disks that can't be partitioned. */ 126 * disks that can't be partitioned. */
119 char disk_name[32]; /* name of major driver */ 127 char disk_name[32]; /* name of major driver */
120 struct hd_struct **part; /* [indexed by minor] */ 128 struct hd_struct **part; /* [indexed by minor] */
121 struct block_device_operations *fops; 129 struct block_device_operations *fops;
122 struct request_queue *queue; 130 struct request_queue *queue;
131 struct blk_scsi_cmd_filter cmd_filter;
123 void *private_data; 132 void *private_data;
124 sector_t capacity; 133 sector_t capacity;
125 134
126 int flags; 135 int flags;
127 struct device *driverfs_dev; // FIXME: remove 136 struct device *driverfs_dev; // FIXME: remove
128 struct device dev; 137 struct device dev;
129 struct kobject *holder_dir; 138 struct kobject *holder_dir;
130 struct kobject *slave_dir; 139 struct kobject *slave_dir;
131 140
132 struct timer_rand_state *random; 141 struct timer_rand_state *random;
133 int policy; 142 int policy;
134 143
135 atomic_t sync_io; /* RAID */ 144 atomic_t sync_io; /* RAID */
136 unsigned long stamp; 145 unsigned long stamp;
137 int in_flight; 146 int in_flight;
138 #ifdef CONFIG_SMP 147 #ifdef CONFIG_SMP
139 struct disk_stats *dkstats; 148 struct disk_stats *dkstats;
140 #else 149 #else
141 struct disk_stats dkstats; 150 struct disk_stats dkstats;
142 #endif 151 #endif
143 struct work_struct async_notify; 152 struct work_struct async_notify;
144 #ifdef CONFIG_BLK_DEV_INTEGRITY 153 #ifdef CONFIG_BLK_DEV_INTEGRITY
145 struct blk_integrity *integrity; 154 struct blk_integrity *integrity;
146 #endif 155 #endif
147 }; 156 };
148 157
149 /* 158 /*
150 * Macros to operate on percpu disk statistics: 159 * Macros to operate on percpu disk statistics:
151 * 160 *
152 * The __ variants should only be called in critical sections. The full 161 * The __ variants should only be called in critical sections. The full
153 * variants disable/enable preemption. 162 * variants disable/enable preemption.
154 */ 163 */
155 static inline struct hd_struct *get_part(struct gendisk *gendiskp, 164 static inline struct hd_struct *get_part(struct gendisk *gendiskp,
156 sector_t sector) 165 sector_t sector)
157 { 166 {
158 struct hd_struct *part; 167 struct hd_struct *part;
159 int i; 168 int i;
160 for (i = 0; i < gendiskp->minors - 1; i++) { 169 for (i = 0; i < gendiskp->minors - 1; i++) {
161 part = gendiskp->part[i]; 170 part = gendiskp->part[i];
162 if (part && part->start_sect <= sector 171 if (part && part->start_sect <= sector
163 && sector < part->start_sect + part->nr_sects) 172 && sector < part->start_sect + part->nr_sects)
164 return part; 173 return part;
165 } 174 }
166 return NULL; 175 return NULL;
167 } 176 }
168 177
169 #ifdef CONFIG_SMP 178 #ifdef CONFIG_SMP
170 #define __disk_stat_add(gendiskp, field, addnd) \ 179 #define __disk_stat_add(gendiskp, field, addnd) \
171 (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) 180 (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd)
172 181
173 #define disk_stat_read(gendiskp, field) \ 182 #define disk_stat_read(gendiskp, field) \
174 ({ \ 183 ({ \
175 typeof(gendiskp->dkstats->field) res = 0; \ 184 typeof(gendiskp->dkstats->field) res = 0; \
176 int i; \ 185 int i; \
177 for_each_possible_cpu(i) \ 186 for_each_possible_cpu(i) \
178 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ 187 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
179 res; \ 188 res; \
180 }) 189 })
181 190
182 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 191 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
183 int i; 192 int i;
184 193
185 for_each_possible_cpu(i) 194 for_each_possible_cpu(i)
186 memset(per_cpu_ptr(gendiskp->dkstats, i), value, 195 memset(per_cpu_ptr(gendiskp->dkstats, i), value,
187 sizeof(struct disk_stats)); 196 sizeof(struct disk_stats));
188 } 197 }
189 198
190 #define __part_stat_add(part, field, addnd) \ 199 #define __part_stat_add(part, field, addnd) \
191 (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd) 200 (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd)
192 201
193 #define __all_stat_add(gendiskp, part, field, addnd, sector) \ 202 #define __all_stat_add(gendiskp, part, field, addnd, sector) \
194 ({ \ 203 ({ \
195 if (part) \ 204 if (part) \
196 __part_stat_add(part, field, addnd); \ 205 __part_stat_add(part, field, addnd); \
197 __disk_stat_add(gendiskp, field, addnd); \ 206 __disk_stat_add(gendiskp, field, addnd); \
198 }) 207 })
199 208
200 #define part_stat_read(part, field) \ 209 #define part_stat_read(part, field) \
201 ({ \ 210 ({ \
202 typeof(part->dkstats->field) res = 0; \ 211 typeof(part->dkstats->field) res = 0; \
203 int i; \ 212 int i; \
204 for_each_possible_cpu(i) \ 213 for_each_possible_cpu(i) \
205 res += per_cpu_ptr(part->dkstats, i)->field; \ 214 res += per_cpu_ptr(part->dkstats, i)->field; \
206 res; \ 215 res; \
207 }) 216 })
208 217
209 static inline void part_stat_set_all(struct hd_struct *part, int value) 218 static inline void part_stat_set_all(struct hd_struct *part, int value)
210 { 219 {
211 int i; 220 int i;
212 221
213 for_each_possible_cpu(i) 222 for_each_possible_cpu(i)
214 memset(per_cpu_ptr(part->dkstats, i), value, 223 memset(per_cpu_ptr(part->dkstats, i), value,
215 sizeof(struct disk_stats)); 224 sizeof(struct disk_stats));
216 } 225 }
217 226
218 #else /* !CONFIG_SMP */ 227 #else /* !CONFIG_SMP */
219 #define __disk_stat_add(gendiskp, field, addnd) \ 228 #define __disk_stat_add(gendiskp, field, addnd) \
220 (gendiskp->dkstats.field += addnd) 229 (gendiskp->dkstats.field += addnd)
221 #define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field) 230 #define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field)
222 231
223 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) 232 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value)
224 { 233 {
225 memset(&gendiskp->dkstats, value, sizeof (struct disk_stats)); 234 memset(&gendiskp->dkstats, value, sizeof (struct disk_stats));
226 } 235 }
227 236
228 #define __part_stat_add(part, field, addnd) \ 237 #define __part_stat_add(part, field, addnd) \
229 (part->dkstats.field += addnd) 238 (part->dkstats.field += addnd)
230 239
231 #define __all_stat_add(gendiskp, part, field, addnd, sector) \ 240 #define __all_stat_add(gendiskp, part, field, addnd, sector) \
232 ({ \ 241 ({ \
233 if (part) \ 242 if (part) \
234 part->dkstats.field += addnd; \ 243 part->dkstats.field += addnd; \
235 __disk_stat_add(gendiskp, field, addnd); \ 244 __disk_stat_add(gendiskp, field, addnd); \
236 }) 245 })
237 246
238 #define part_stat_read(part, field) (part->dkstats.field) 247 #define part_stat_read(part, field) (part->dkstats.field)
239 248
240 static inline void part_stat_set_all(struct hd_struct *part, int value) 249 static inline void part_stat_set_all(struct hd_struct *part, int value)
241 { 250 {
242 memset(&part->dkstats, value, sizeof(struct disk_stats)); 251 memset(&part->dkstats, value, sizeof(struct disk_stats));
243 } 252 }
244 253
245 #endif /* CONFIG_SMP */ 254 #endif /* CONFIG_SMP */
246 255
247 #define disk_stat_add(gendiskp, field, addnd) \ 256 #define disk_stat_add(gendiskp, field, addnd) \
248 do { \ 257 do { \
249 preempt_disable(); \ 258 preempt_disable(); \
250 __disk_stat_add(gendiskp, field, addnd); \ 259 __disk_stat_add(gendiskp, field, addnd); \
251 preempt_enable(); \ 260 preempt_enable(); \
252 } while (0) 261 } while (0)
253 262
254 #define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1) 263 #define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1)
255 #define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1) 264 #define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1)
256 265
257 #define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1) 266 #define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1)
258 #define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1) 267 #define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1)
259 268
260 #define __disk_stat_sub(gendiskp, field, subnd) \ 269 #define __disk_stat_sub(gendiskp, field, subnd) \
261 __disk_stat_add(gendiskp, field, -subnd) 270 __disk_stat_add(gendiskp, field, -subnd)
262 #define disk_stat_sub(gendiskp, field, subnd) \ 271 #define disk_stat_sub(gendiskp, field, subnd) \
263 disk_stat_add(gendiskp, field, -subnd) 272 disk_stat_add(gendiskp, field, -subnd)
264 273
265 #define part_stat_add(gendiskp, field, addnd) \ 274 #define part_stat_add(gendiskp, field, addnd) \
266 do { \ 275 do { \
267 preempt_disable(); \ 276 preempt_disable(); \
268 __part_stat_add(gendiskp, field, addnd);\ 277 __part_stat_add(gendiskp, field, addnd);\
269 preempt_enable(); \ 278 preempt_enable(); \
270 } while (0) 279 } while (0)
271 280
272 #define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1) 281 #define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1)
273 #define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1) 282 #define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1)
274 283
275 #define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1) 284 #define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1)
276 #define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1) 285 #define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1)
277 286
278 #define __part_stat_sub(gendiskp, field, subnd) \ 287 #define __part_stat_sub(gendiskp, field, subnd) \
279 __part_stat_add(gendiskp, field, -subnd) 288 __part_stat_add(gendiskp, field, -subnd)
280 #define part_stat_sub(gendiskp, field, subnd) \ 289 #define part_stat_sub(gendiskp, field, subnd) \
281 part_stat_add(gendiskp, field, -subnd) 290 part_stat_add(gendiskp, field, -subnd)
282 291
283 #define all_stat_add(gendiskp, part, field, addnd, sector) \ 292 #define all_stat_add(gendiskp, part, field, addnd, sector) \
284 do { \ 293 do { \
285 preempt_disable(); \ 294 preempt_disable(); \
286 __all_stat_add(gendiskp, part, field, addnd, sector); \ 295 __all_stat_add(gendiskp, part, field, addnd, sector); \
287 preempt_enable(); \ 296 preempt_enable(); \
288 } while (0) 297 } while (0)
289 298
290 #define __all_stat_dec(gendiskp, field, sector) \ 299 #define __all_stat_dec(gendiskp, field, sector) \
291 __all_stat_add(gendiskp, field, -1, sector) 300 __all_stat_add(gendiskp, field, -1, sector)
292 #define all_stat_dec(gendiskp, field, sector) \ 301 #define all_stat_dec(gendiskp, field, sector) \
293 all_stat_add(gendiskp, field, -1, sector) 302 all_stat_add(gendiskp, field, -1, sector)
294 303
295 #define __all_stat_inc(gendiskp, part, field, sector) \ 304 #define __all_stat_inc(gendiskp, part, field, sector) \
296 __all_stat_add(gendiskp, part, field, 1, sector) 305 __all_stat_add(gendiskp, part, field, 1, sector)
297 #define all_stat_inc(gendiskp, part, field, sector) \ 306 #define all_stat_inc(gendiskp, part, field, sector) \
298 all_stat_add(gendiskp, part, field, 1, sector) 307 all_stat_add(gendiskp, part, field, 1, sector)
299 308
300 #define __all_stat_sub(gendiskp, part, field, subnd, sector) \ 309 #define __all_stat_sub(gendiskp, part, field, subnd, sector) \
301 __all_stat_add(gendiskp, part, field, -subnd, sector) 310 __all_stat_add(gendiskp, part, field, -subnd, sector)
302 #define all_stat_sub(gendiskp, part, field, subnd, sector) \ 311 #define all_stat_sub(gendiskp, part, field, subnd, sector) \
303 all_stat_add(gendiskp, part, field, -subnd, sector) 312 all_stat_add(gendiskp, part, field, -subnd, sector)
304 313
305 /* Inlines to alloc and free disk stats in struct gendisk */ 314 /* Inlines to alloc and free disk stats in struct gendisk */
306 #ifdef CONFIG_SMP 315 #ifdef CONFIG_SMP
307 static inline int init_disk_stats(struct gendisk *disk) 316 static inline int init_disk_stats(struct gendisk *disk)
308 { 317 {
309 disk->dkstats = alloc_percpu(struct disk_stats); 318 disk->dkstats = alloc_percpu(struct disk_stats);
310 if (!disk->dkstats) 319 if (!disk->dkstats)
311 return 0; 320 return 0;
312 return 1; 321 return 1;
313 } 322 }
314 323
315 static inline void free_disk_stats(struct gendisk *disk) 324 static inline void free_disk_stats(struct gendisk *disk)
316 { 325 {
317 free_percpu(disk->dkstats); 326 free_percpu(disk->dkstats);
318 } 327 }
319 328
320 static inline int init_part_stats(struct hd_struct *part) 329 static inline int init_part_stats(struct hd_struct *part)
321 { 330 {
322 part->dkstats = alloc_percpu(struct disk_stats); 331 part->dkstats = alloc_percpu(struct disk_stats);
323 if (!part->dkstats) 332 if (!part->dkstats)
324 return 0; 333 return 0;
325 return 1; 334 return 1;
326 } 335 }
327 336
328 static inline void free_part_stats(struct hd_struct *part) 337 static inline void free_part_stats(struct hd_struct *part)
329 { 338 {
330 free_percpu(part->dkstats); 339 free_percpu(part->dkstats);
331 } 340 }
332 341
333 #else /* CONFIG_SMP */ 342 #else /* CONFIG_SMP */
334 static inline int init_disk_stats(struct gendisk *disk) 343 static inline int init_disk_stats(struct gendisk *disk)
335 { 344 {
336 return 1; 345 return 1;
337 } 346 }
338 347
339 static inline void free_disk_stats(struct gendisk *disk) 348 static inline void free_disk_stats(struct gendisk *disk)
340 { 349 {
341 } 350 }
342 351
343 static inline int init_part_stats(struct hd_struct *part) 352 static inline int init_part_stats(struct hd_struct *part)
344 { 353 {
345 return 1; 354 return 1;
346 } 355 }
347 356
348 static inline void free_part_stats(struct hd_struct *part) 357 static inline void free_part_stats(struct hd_struct *part)
349 { 358 {
350 } 359 }
351 #endif /* CONFIG_SMP */ 360 #endif /* CONFIG_SMP */
352 361
353 /* drivers/block/ll_rw_blk.c */ 362 /* drivers/block/ll_rw_blk.c */
354 extern void disk_round_stats(struct gendisk *disk); 363 extern void disk_round_stats(struct gendisk *disk);
355 extern void part_round_stats(struct hd_struct *part); 364 extern void part_round_stats(struct hd_struct *part);
356 365
357 /* drivers/block/genhd.c */ 366 /* drivers/block/genhd.c */
358 extern int get_blkdev_list(char *, int); 367 extern int get_blkdev_list(char *, int);
359 extern void add_disk(struct gendisk *disk); 368 extern void add_disk(struct gendisk *disk);
360 extern void del_gendisk(struct gendisk *gp); 369 extern void del_gendisk(struct gendisk *gp);
361 extern void unlink_gendisk(struct gendisk *gp); 370 extern void unlink_gendisk(struct gendisk *gp);
362 extern struct gendisk *get_gendisk(dev_t dev, int *part); 371 extern struct gendisk *get_gendisk(dev_t dev, int *part);
363 372
364 extern void set_device_ro(struct block_device *bdev, int flag); 373 extern void set_device_ro(struct block_device *bdev, int flag);
365 extern void set_disk_ro(struct gendisk *disk, int flag); 374 extern void set_disk_ro(struct gendisk *disk, int flag);
366 375
367 /* drivers/char/random.c */ 376 /* drivers/char/random.c */
368 extern void add_disk_randomness(struct gendisk *disk); 377 extern void add_disk_randomness(struct gendisk *disk);
369 extern void rand_initialize_disk(struct gendisk *disk); 378 extern void rand_initialize_disk(struct gendisk *disk);
370 379
371 static inline sector_t get_start_sect(struct block_device *bdev) 380 static inline sector_t get_start_sect(struct block_device *bdev)
372 { 381 {
373 return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect; 382 return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect;
374 } 383 }
375 static inline sector_t get_capacity(struct gendisk *disk) 384 static inline sector_t get_capacity(struct gendisk *disk)
376 { 385 {
377 return disk->capacity; 386 return disk->capacity;
378 } 387 }
379 static inline void set_capacity(struct gendisk *disk, sector_t size) 388 static inline void set_capacity(struct gendisk *disk, sector_t size)
380 { 389 {
381 disk->capacity = size; 390 disk->capacity = size;
382 } 391 }
383 392
384 #ifdef CONFIG_SOLARIS_X86_PARTITION 393 #ifdef CONFIG_SOLARIS_X86_PARTITION
385 394
386 #define SOLARIS_X86_NUMSLICE 16 395 #define SOLARIS_X86_NUMSLICE 16
387 #define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) 396 #define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL)
388 397
389 struct solaris_x86_slice { 398 struct solaris_x86_slice {
390 __le16 s_tag; /* ID tag of partition */ 399 __le16 s_tag; /* ID tag of partition */
391 __le16 s_flag; /* permission flags */ 400 __le16 s_flag; /* permission flags */
392 __le32 s_start; /* start sector no of partition */ 401 __le32 s_start; /* start sector no of partition */
393 __le32 s_size; /* # of blocks in partition */ 402 __le32 s_size; /* # of blocks in partition */
394 }; 403 };
395 404
396 struct solaris_x86_vtoc { 405 struct solaris_x86_vtoc {
397 unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */ 406 unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */
398 __le32 v_sanity; /* to verify vtoc sanity */ 407 __le32 v_sanity; /* to verify vtoc sanity */
399 __le32 v_version; /* layout version */ 408 __le32 v_version; /* layout version */
400 char v_volume[8]; /* volume name */ 409 char v_volume[8]; /* volume name */
401 __le16 v_sectorsz; /* sector size in bytes */ 410 __le16 v_sectorsz; /* sector size in bytes */
402 __le16 v_nparts; /* number of partitions */ 411 __le16 v_nparts; /* number of partitions */
403 unsigned int v_reserved[10]; /* free space */ 412 unsigned int v_reserved[10]; /* free space */
404 struct solaris_x86_slice 413 struct solaris_x86_slice
405 v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */ 414 v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */
406 unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */ 415 unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */
407 char v_asciilabel[128]; /* for compatibility */ 416 char v_asciilabel[128]; /* for compatibility */
408 }; 417 };
409 418
410 #endif /* CONFIG_SOLARIS_X86_PARTITION */ 419 #endif /* CONFIG_SOLARIS_X86_PARTITION */
411 420
412 #ifdef CONFIG_BSD_DISKLABEL 421 #ifdef CONFIG_BSD_DISKLABEL
413 /* 422 /*
414 * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il> 423 * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
415 * updated by Marc Espie <Marc.Espie@openbsd.org> 424 * updated by Marc Espie <Marc.Espie@openbsd.org>
416 */ 425 */
417 426
418 /* check against BSD src/sys/sys/disklabel.h for consistency */ 427 /* check against BSD src/sys/sys/disklabel.h for consistency */
419 428
420 #define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ 429 #define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
421 #define BSD_MAXPARTITIONS 16 430 #define BSD_MAXPARTITIONS 16
422 #define OPENBSD_MAXPARTITIONS 16 431 #define OPENBSD_MAXPARTITIONS 16
423 #define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ 432 #define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
424 struct bsd_disklabel { 433 struct bsd_disklabel {
425 __le32 d_magic; /* the magic number */ 434 __le32 d_magic; /* the magic number */
426 __s16 d_type; /* drive type */ 435 __s16 d_type; /* drive type */
427 __s16 d_subtype; /* controller/d_type specific */ 436 __s16 d_subtype; /* controller/d_type specific */
428 char d_typename[16]; /* type name, e.g. "eagle" */ 437 char d_typename[16]; /* type name, e.g. "eagle" */
429 char d_packname[16]; /* pack identifier */ 438 char d_packname[16]; /* pack identifier */
430 __u32 d_secsize; /* # of bytes per sector */ 439 __u32 d_secsize; /* # of bytes per sector */
431 __u32 d_nsectors; /* # of data sectors per track */ 440 __u32 d_nsectors; /* # of data sectors per track */
432 __u32 d_ntracks; /* # of tracks per cylinder */ 441 __u32 d_ntracks; /* # of tracks per cylinder */
433 __u32 d_ncylinders; /* # of data cylinders per unit */ 442 __u32 d_ncylinders; /* # of data cylinders per unit */
434 __u32 d_secpercyl; /* # of data sectors per cylinder */ 443 __u32 d_secpercyl; /* # of data sectors per cylinder */
435 __u32 d_secperunit; /* # of data sectors per unit */ 444 __u32 d_secperunit; /* # of data sectors per unit */
436 __u16 d_sparespertrack; /* # of spare sectors per track */ 445 __u16 d_sparespertrack; /* # of spare sectors per track */
437 __u16 d_sparespercyl; /* # of spare sectors per cylinder */ 446 __u16 d_sparespercyl; /* # of spare sectors per cylinder */
438 __u32 d_acylinders; /* # of alt. cylinders per unit */ 447 __u32 d_acylinders; /* # of alt. cylinders per unit */
439 __u16 d_rpm; /* rotational speed */ 448 __u16 d_rpm; /* rotational speed */
440 __u16 d_interleave; /* hardware sector interleave */ 449 __u16 d_interleave; /* hardware sector interleave */
441 __u16 d_trackskew; /* sector 0 skew, per track */ 450 __u16 d_trackskew; /* sector 0 skew, per track */
442 __u16 d_cylskew; /* sector 0 skew, per cylinder */ 451 __u16 d_cylskew; /* sector 0 skew, per cylinder */
443 __u32 d_headswitch; /* head switch time, usec */ 452 __u32 d_headswitch; /* head switch time, usec */
444 __u32 d_trkseek; /* track-to-track seek, usec */ 453 __u32 d_trkseek; /* track-to-track seek, usec */
445 __u32 d_flags; /* generic flags */ 454 __u32 d_flags; /* generic flags */
446 #define NDDATA 5 455 #define NDDATA 5
447 __u32 d_drivedata[NDDATA]; /* drive-type specific information */ 456 __u32 d_drivedata[NDDATA]; /* drive-type specific information */
448 #define NSPARE 5 457 #define NSPARE 5
449 __u32 d_spare[NSPARE]; /* reserved for future use */ 458 __u32 d_spare[NSPARE]; /* reserved for future use */
450 __le32 d_magic2; /* the magic number (again) */ 459 __le32 d_magic2; /* the magic number (again) */
451 __le16 d_checksum; /* xor of data incl. partitions */ 460 __le16 d_checksum; /* xor of data incl. partitions */
452 461
453 /* filesystem and partition information: */ 462 /* filesystem and partition information: */
454 __le16 d_npartitions; /* number of partitions in following */ 463 __le16 d_npartitions; /* number of partitions in following */
455 __le32 d_bbsize; /* size of boot area at sn0, bytes */ 464 __le32 d_bbsize; /* size of boot area at sn0, bytes */
456 __le32 d_sbsize; /* max size of fs superblock, bytes */ 465 __le32 d_sbsize; /* max size of fs superblock, bytes */
457 struct bsd_partition { /* the partition table */ 466 struct bsd_partition { /* the partition table */
458 __le32 p_size; /* number of sectors in partition */ 467 __le32 p_size; /* number of sectors in partition */
459 __le32 p_offset; /* starting sector */ 468 __le32 p_offset; /* starting sector */
460 __le32 p_fsize; /* filesystem basic fragment size */ 469 __le32 p_fsize; /* filesystem basic fragment size */
461 __u8 p_fstype; /* filesystem type, see below */ 470 __u8 p_fstype; /* filesystem type, see below */
462 __u8 p_frag; /* filesystem fragments per block */ 471 __u8 p_frag; /* filesystem fragments per block */
463 __le16 p_cpg; /* filesystem cylinders per group */ 472 __le16 p_cpg; /* filesystem cylinders per group */
464 } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ 473 } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
465 }; 474 };
466 475
467 #endif /* CONFIG_BSD_DISKLABEL */ 476 #endif /* CONFIG_BSD_DISKLABEL */
468 477
469 #ifdef CONFIG_UNIXWARE_DISKLABEL 478 #ifdef CONFIG_UNIXWARE_DISKLABEL
470 /* 479 /*
471 * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl> 480 * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
472 * and Krzysztof G. Baranowski <kgb@knm.org.pl> 481 * and Krzysztof G. Baranowski <kgb@knm.org.pl>
473 */ 482 */
474 483
475 #define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */ 484 #define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */
476 #define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */ 485 #define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */
477 #define UNIXWARE_NUMSLICE 16 486 #define UNIXWARE_NUMSLICE 16
478 #define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */ 487 #define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */
479 488
480 struct unixware_slice { 489 struct unixware_slice {
481 __le16 s_label; /* label */ 490 __le16 s_label; /* label */
482 __le16 s_flags; /* permission flags */ 491 __le16 s_flags; /* permission flags */
483 __le32 start_sect; /* starting sector */ 492 __le32 start_sect; /* starting sector */
484 __le32 nr_sects; /* number of sectors in slice */ 493 __le32 nr_sects; /* number of sectors in slice */
485 }; 494 };
486 495
487 struct unixware_disklabel { 496 struct unixware_disklabel {
488 __le32 d_type; /* drive type */ 497 __le32 d_type; /* drive type */
489 __le32 d_magic; /* the magic number */ 498 __le32 d_magic; /* the magic number */
490 __le32 d_version; /* version number */ 499 __le32 d_version; /* version number */
491 char d_serial[12]; /* serial number of the device */ 500 char d_serial[12]; /* serial number of the device */
492 __le32 d_ncylinders; /* # of data cylinders per device */ 501 __le32 d_ncylinders; /* # of data cylinders per device */
493 __le32 d_ntracks; /* # of tracks per cylinder */ 502 __le32 d_ntracks; /* # of tracks per cylinder */
494 __le32 d_nsectors; /* # of data sectors per track */ 503 __le32 d_nsectors; /* # of data sectors per track */
495 __le32 d_secsize; /* # of bytes per sector */ 504 __le32 d_secsize; /* # of bytes per sector */
496 __le32 d_part_start; /* # of first sector of this partition */ 505 __le32 d_part_start; /* # of first sector of this partition */
497 __le32 d_unknown1[12]; /* ? */ 506 __le32 d_unknown1[12]; /* ? */
498 __le32 d_alt_tbl; /* byte offset of alternate table */ 507 __le32 d_alt_tbl; /* byte offset of alternate table */
499 __le32 d_alt_len; /* byte length of alternate table */ 508 __le32 d_alt_len; /* byte length of alternate table */
500 __le32 d_phys_cyl; /* # of physical cylinders per device */ 509 __le32 d_phys_cyl; /* # of physical cylinders per device */
501 __le32 d_phys_trk; /* # of physical tracks per cylinder */ 510 __le32 d_phys_trk; /* # of physical tracks per cylinder */
502 __le32 d_phys_sec; /* # of physical sectors per track */ 511 __le32 d_phys_sec; /* # of physical sectors per track */
503 __le32 d_phys_bytes; /* # of physical bytes per sector */ 512 __le32 d_phys_bytes; /* # of physical bytes per sector */
504 __le32 d_unknown2; /* ? */ 513 __le32 d_unknown2; /* ? */
505 __le32 d_unknown3; /* ? */ 514 __le32 d_unknown3; /* ? */
506 __le32 d_pad[8]; /* pad */ 515 __le32 d_pad[8]; /* pad */
507 516
508 struct unixware_vtoc { 517 struct unixware_vtoc {
509 __le32 v_magic; /* the magic number */ 518 __le32 v_magic; /* the magic number */
510 __le32 v_version; /* version number */ 519 __le32 v_version; /* version number */
511 char v_name[8]; /* volume name */ 520 char v_name[8]; /* volume name */
512 __le16 v_nslices; /* # of slices */ 521 __le16 v_nslices; /* # of slices */
513 __le16 v_unknown1; /* ? */ 522 __le16 v_unknown1; /* ? */
514 __le32 v_reserved[10]; /* reserved */ 523 __le32 v_reserved[10]; /* reserved */
515 struct unixware_slice 524 struct unixware_slice
516 v_slice[UNIXWARE_NUMSLICE]; /* slice headers */ 525 v_slice[UNIXWARE_NUMSLICE]; /* slice headers */
517 } vtoc; 526 } vtoc;
518 527
519 }; /* 408 */ 528 }; /* 408 */
520 529
521 #endif /* CONFIG_UNIXWARE_DISKLABEL */ 530 #endif /* CONFIG_UNIXWARE_DISKLABEL */
522 531
523 #ifdef CONFIG_MINIX_SUBPARTITION 532 #ifdef CONFIG_MINIX_SUBPARTITION
524 # define MINIX_NR_SUBPARTITIONS 4 533 # define MINIX_NR_SUBPARTITIONS 4
525 #endif /* CONFIG_MINIX_SUBPARTITION */ 534 #endif /* CONFIG_MINIX_SUBPARTITION */
526 535
527 #define ADDPART_FLAG_NONE 0 536 #define ADDPART_FLAG_NONE 0
528 #define ADDPART_FLAG_RAID 1 537 #define ADDPART_FLAG_RAID 1
529 #define ADDPART_FLAG_WHOLEDISK 2 538 #define ADDPART_FLAG_WHOLEDISK 2
530 539
531 extern dev_t blk_lookup_devt(const char *name, int part); 540 extern dev_t blk_lookup_devt(const char *name, int part);
532 extern char *disk_name (struct gendisk *hd, int part, char *buf); 541 extern char *disk_name (struct gendisk *hd, int part, char *buf);
533 542
534 extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 543 extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
535 extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); 544 extern void add_partition(struct gendisk *, int, sector_t, sector_t, int);
536 extern void delete_partition(struct gendisk *, int); 545 extern void delete_partition(struct gendisk *, int);
537 extern void printk_all_partitions(void); 546 extern void printk_all_partitions(void);
538 547
539 extern struct gendisk *alloc_disk_node(int minors, int node_id); 548 extern struct gendisk *alloc_disk_node(int minors, int node_id);
540 extern struct gendisk *alloc_disk(int minors); 549 extern struct gendisk *alloc_disk(int minors);
541 extern struct kobject *get_disk(struct gendisk *disk); 550 extern struct kobject *get_disk(struct gendisk *disk);
542 extern void put_disk(struct gendisk *disk); 551 extern void put_disk(struct gendisk *disk);
543 extern void blk_register_region(dev_t devt, unsigned long range, 552 extern void blk_register_region(dev_t devt, unsigned long range,
544 struct module *module, 553 struct module *module,
545 struct kobject *(*probe)(dev_t, int *, void *), 554 struct kobject *(*probe)(dev_t, int *, void *),
546 int (*lock)(dev_t, void *), 555 int (*lock)(dev_t, void *),
547 void *data); 556 void *data);
548 extern void blk_unregister_region(dev_t devt, unsigned long range); 557 extern void blk_unregister_region(dev_t devt, unsigned long range);
549 558
550 static inline struct block_device *bdget_disk(struct gendisk *disk, int index) 559 static inline struct block_device *bdget_disk(struct gendisk *disk, int index)
551 { 560 {
552 return bdget(MKDEV(disk->major, disk->first_minor) + index); 561 return bdget(MKDEV(disk->major, disk->first_minor) + index);
553 } 562 }
554 563
555 #else /* CONFIG_BLOCK */ 564 #else /* CONFIG_BLOCK */
556 565
557 static inline void printk_all_partitions(void) { } 566 static inline void printk_all_partitions(void) { }
558 567
559 static inline dev_t blk_lookup_devt(const char *name, int part) 568 static inline dev_t blk_lookup_devt(const char *name, int part)
560 { 569 {
561 dev_t devt = MKDEV(0, 0); 570 dev_t devt = MKDEV(0, 0);
562 return devt; 571 return devt;
563 } 572 }
564 573
565 #endif /* CONFIG_BLOCK */ 574 #endif /* CONFIG_BLOCK */
566 575
567 #endif /* _LINUX_GENHD_H */ 576 #endif /* _LINUX_GENHD_H */
568 577