Commit 52c07423a819091b0fe9abbf26977098b996f85b
1 parent
3eccfdb01d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
target/rd: Add ramdisk bit for NULLIO operation
This patch adds a rd_nullio parameter that allows RAMDISK_MCP backends to function in NULLIO mode, where all se_cmd I/O is immediately completed in rd_execute_rw() without actually performing the SGL memory copy. This is useful for performance testing when the ramdisk SGL memory copy begins to eat lots of cycles during heavy small block workloads, so allow this bit to be enabled when necessary on a per rd_dev basis. Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Showing 2 changed files with 19 additions and 3 deletions Inline Diff
drivers/target/target_core_rd.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_rd.c | 2 | * Filename: target_core_rd.c |
3 | * | 3 | * |
4 | * This file contains the Storage Engine <-> Ramdisk transport | 4 | * This file contains the Storage Engine <-> Ramdisk transport |
5 | * specific functions. | 5 | * specific functions. |
6 | * | 6 | * |
7 | * (c) Copyright 2003-2012 RisingTide Systems LLC. | 7 | * (c) Copyright 2003-2012 RisingTide Systems LLC. |
8 | * | 8 | * |
9 | * Nicholas A. Bellinger <nab@kernel.org> | 9 | * Nicholas A. Bellinger <nab@kernel.org> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation; either version 2 of the License, or | 13 | * the Free Software Foundation; either version 2 of the License, or |
14 | * (at your option) any later version. | 14 | * (at your option) any later version. |
15 | * | 15 | * |
16 | * This program is distributed in the hope that it will be useful, | 16 | * This program is distributed in the hope that it will be useful, |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | * GNU General Public License for more details. | 19 | * GNU General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License | 21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program; if not, write to the Free Software | 22 | * along with this program; if not, write to the Free Software |
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
24 | * | 24 | * |
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/parser.h> | 28 | #include <linux/parser.h> |
29 | #include <linux/timer.h> | 29 | #include <linux/timer.h> |
30 | #include <linux/blkdev.h> | 30 | #include <linux/blkdev.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
33 | #include <scsi/scsi.h> | 33 | #include <scsi/scsi.h> |
34 | #include <scsi/scsi_host.h> | 34 | #include <scsi/scsi_host.h> |
35 | 35 | ||
36 | #include <target/target_core_base.h> | 36 | #include <target/target_core_base.h> |
37 | #include <target/target_core_backend.h> | 37 | #include <target/target_core_backend.h> |
38 | 38 | ||
39 | #include "target_core_rd.h" | 39 | #include "target_core_rd.h" |
40 | 40 | ||
41 | static inline struct rd_dev *RD_DEV(struct se_device *dev) | 41 | static inline struct rd_dev *RD_DEV(struct se_device *dev) |
42 | { | 42 | { |
43 | return container_of(dev, struct rd_dev, dev); | 43 | return container_of(dev, struct rd_dev, dev); |
44 | } | 44 | } |
45 | 45 | ||
46 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) | 46 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) |
47 | * | 47 | * |
48 | * | 48 | * |
49 | */ | 49 | */ |
50 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) | 50 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) |
51 | { | 51 | { |
52 | struct rd_host *rd_host; | 52 | struct rd_host *rd_host; |
53 | 53 | ||
54 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); | 54 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); |
55 | if (!rd_host) { | 55 | if (!rd_host) { |
56 | pr_err("Unable to allocate memory for struct rd_host\n"); | 56 | pr_err("Unable to allocate memory for struct rd_host\n"); |
57 | return -ENOMEM; | 57 | return -ENOMEM; |
58 | } | 58 | } |
59 | 59 | ||
60 | rd_host->rd_host_id = host_id; | 60 | rd_host->rd_host_id = host_id; |
61 | 61 | ||
62 | hba->hba_ptr = rd_host; | 62 | hba->hba_ptr = rd_host; |
63 | 63 | ||
64 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" | 64 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" |
65 | " Generic Target Core Stack %s\n", hba->hba_id, | 65 | " Generic Target Core Stack %s\n", hba->hba_id, |
66 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); | 66 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); |
67 | 67 | ||
68 | return 0; | 68 | return 0; |
69 | } | 69 | } |
70 | 70 | ||
71 | static void rd_detach_hba(struct se_hba *hba) | 71 | static void rd_detach_hba(struct se_hba *hba) |
72 | { | 72 | { |
73 | struct rd_host *rd_host = hba->hba_ptr; | 73 | struct rd_host *rd_host = hba->hba_ptr; |
74 | 74 | ||
75 | pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" | 75 | pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" |
76 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); | 76 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); |
77 | 77 | ||
78 | kfree(rd_host); | 78 | kfree(rd_host); |
79 | hba->hba_ptr = NULL; | 79 | hba->hba_ptr = NULL; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* rd_release_device_space(): | 82 | /* rd_release_device_space(): |
83 | * | 83 | * |
84 | * | 84 | * |
85 | */ | 85 | */ |
86 | static void rd_release_device_space(struct rd_dev *rd_dev) | 86 | static void rd_release_device_space(struct rd_dev *rd_dev) |
87 | { | 87 | { |
88 | u32 i, j, page_count = 0, sg_per_table; | 88 | u32 i, j, page_count = 0, sg_per_table; |
89 | struct rd_dev_sg_table *sg_table; | 89 | struct rd_dev_sg_table *sg_table; |
90 | struct page *pg; | 90 | struct page *pg; |
91 | struct scatterlist *sg; | 91 | struct scatterlist *sg; |
92 | 92 | ||
93 | if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) | 93 | if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) |
94 | return; | 94 | return; |
95 | 95 | ||
96 | sg_table = rd_dev->sg_table_array; | 96 | sg_table = rd_dev->sg_table_array; |
97 | 97 | ||
98 | for (i = 0; i < rd_dev->sg_table_count; i++) { | 98 | for (i = 0; i < rd_dev->sg_table_count; i++) { |
99 | sg = sg_table[i].sg_table; | 99 | sg = sg_table[i].sg_table; |
100 | sg_per_table = sg_table[i].rd_sg_count; | 100 | sg_per_table = sg_table[i].rd_sg_count; |
101 | 101 | ||
102 | for (j = 0; j < sg_per_table; j++) { | 102 | for (j = 0; j < sg_per_table; j++) { |
103 | pg = sg_page(&sg[j]); | 103 | pg = sg_page(&sg[j]); |
104 | if (pg) { | 104 | if (pg) { |
105 | __free_page(pg); | 105 | __free_page(pg); |
106 | page_count++; | 106 | page_count++; |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | kfree(sg); | 110 | kfree(sg); |
111 | } | 111 | } |
112 | 112 | ||
113 | pr_debug("CORE_RD[%u] - Released device space for Ramdisk" | 113 | pr_debug("CORE_RD[%u] - Released device space for Ramdisk" |
114 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", | 114 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", |
115 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, | 115 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, |
116 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); | 116 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); |
117 | 117 | ||
118 | kfree(sg_table); | 118 | kfree(sg_table); |
119 | rd_dev->sg_table_array = NULL; | 119 | rd_dev->sg_table_array = NULL; |
120 | rd_dev->sg_table_count = 0; | 120 | rd_dev->sg_table_count = 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | 123 | ||
124 | /* rd_build_device_space(): | 124 | /* rd_build_device_space(): |
125 | * | 125 | * |
126 | * | 126 | * |
127 | */ | 127 | */ |
128 | static int rd_build_device_space(struct rd_dev *rd_dev) | 128 | static int rd_build_device_space(struct rd_dev *rd_dev) |
129 | { | 129 | { |
130 | u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; | 130 | u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; |
131 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / | 131 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
132 | sizeof(struct scatterlist)); | 132 | sizeof(struct scatterlist)); |
133 | struct rd_dev_sg_table *sg_table; | 133 | struct rd_dev_sg_table *sg_table; |
134 | struct page *pg; | 134 | struct page *pg; |
135 | struct scatterlist *sg; | 135 | struct scatterlist *sg; |
136 | 136 | ||
137 | if (rd_dev->rd_page_count <= 0) { | 137 | if (rd_dev->rd_page_count <= 0) { |
138 | pr_err("Illegal page count: %u for Ramdisk device\n", | 138 | pr_err("Illegal page count: %u for Ramdisk device\n", |
139 | rd_dev->rd_page_count); | 139 | rd_dev->rd_page_count); |
140 | return -EINVAL; | 140 | return -EINVAL; |
141 | } | 141 | } |
142 | total_sg_needed = rd_dev->rd_page_count; | 142 | total_sg_needed = rd_dev->rd_page_count; |
143 | 143 | ||
144 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; | 144 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; |
145 | 145 | ||
146 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); | 146 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); |
147 | if (!sg_table) { | 147 | if (!sg_table) { |
148 | pr_err("Unable to allocate memory for Ramdisk" | 148 | pr_err("Unable to allocate memory for Ramdisk" |
149 | " scatterlist tables\n"); | 149 | " scatterlist tables\n"); |
150 | return -ENOMEM; | 150 | return -ENOMEM; |
151 | } | 151 | } |
152 | 152 | ||
153 | rd_dev->sg_table_array = sg_table; | 153 | rd_dev->sg_table_array = sg_table; |
154 | rd_dev->sg_table_count = sg_tables; | 154 | rd_dev->sg_table_count = sg_tables; |
155 | 155 | ||
156 | while (total_sg_needed) { | 156 | while (total_sg_needed) { |
157 | sg_per_table = (total_sg_needed > max_sg_per_table) ? | 157 | sg_per_table = (total_sg_needed > max_sg_per_table) ? |
158 | max_sg_per_table : total_sg_needed; | 158 | max_sg_per_table : total_sg_needed; |
159 | 159 | ||
160 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), | 160 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), |
161 | GFP_KERNEL); | 161 | GFP_KERNEL); |
162 | if (!sg) { | 162 | if (!sg) { |
163 | pr_err("Unable to allocate scatterlist array" | 163 | pr_err("Unable to allocate scatterlist array" |
164 | " for struct rd_dev\n"); | 164 | " for struct rd_dev\n"); |
165 | return -ENOMEM; | 165 | return -ENOMEM; |
166 | } | 166 | } |
167 | 167 | ||
168 | sg_init_table(sg, sg_per_table); | 168 | sg_init_table(sg, sg_per_table); |
169 | 169 | ||
170 | sg_table[i].sg_table = sg; | 170 | sg_table[i].sg_table = sg; |
171 | sg_table[i].rd_sg_count = sg_per_table; | 171 | sg_table[i].rd_sg_count = sg_per_table; |
172 | sg_table[i].page_start_offset = page_offset; | 172 | sg_table[i].page_start_offset = page_offset; |
173 | sg_table[i++].page_end_offset = (page_offset + sg_per_table) | 173 | sg_table[i++].page_end_offset = (page_offset + sg_per_table) |
174 | - 1; | 174 | - 1; |
175 | 175 | ||
176 | for (j = 0; j < sg_per_table; j++) { | 176 | for (j = 0; j < sg_per_table; j++) { |
177 | pg = alloc_pages(GFP_KERNEL, 0); | 177 | pg = alloc_pages(GFP_KERNEL, 0); |
178 | if (!pg) { | 178 | if (!pg) { |
179 | pr_err("Unable to allocate scatterlist" | 179 | pr_err("Unable to allocate scatterlist" |
180 | " pages for struct rd_dev_sg_table\n"); | 180 | " pages for struct rd_dev_sg_table\n"); |
181 | return -ENOMEM; | 181 | return -ENOMEM; |
182 | } | 182 | } |
183 | sg_assign_page(&sg[j], pg); | 183 | sg_assign_page(&sg[j], pg); |
184 | sg[j].length = PAGE_SIZE; | 184 | sg[j].length = PAGE_SIZE; |
185 | } | 185 | } |
186 | 186 | ||
187 | page_offset += sg_per_table; | 187 | page_offset += sg_per_table; |
188 | total_sg_needed -= sg_per_table; | 188 | total_sg_needed -= sg_per_table; |
189 | } | 189 | } |
190 | 190 | ||
191 | pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" | 191 | pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" |
192 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, | 192 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, |
193 | rd_dev->rd_dev_id, rd_dev->rd_page_count, | 193 | rd_dev->rd_dev_id, rd_dev->rd_page_count, |
194 | rd_dev->sg_table_count); | 194 | rd_dev->sg_table_count); |
195 | 195 | ||
196 | return 0; | 196 | return 0; |
197 | } | 197 | } |
198 | 198 | ||
199 | static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) | 199 | static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) |
200 | { | 200 | { |
201 | struct rd_dev *rd_dev; | 201 | struct rd_dev *rd_dev; |
202 | struct rd_host *rd_host = hba->hba_ptr; | 202 | struct rd_host *rd_host = hba->hba_ptr; |
203 | 203 | ||
204 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); | 204 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); |
205 | if (!rd_dev) { | 205 | if (!rd_dev) { |
206 | pr_err("Unable to allocate memory for struct rd_dev\n"); | 206 | pr_err("Unable to allocate memory for struct rd_dev\n"); |
207 | return NULL; | 207 | return NULL; |
208 | } | 208 | } |
209 | 209 | ||
210 | rd_dev->rd_host = rd_host; | 210 | rd_dev->rd_host = rd_host; |
211 | 211 | ||
212 | return &rd_dev->dev; | 212 | return &rd_dev->dev; |
213 | } | 213 | } |
214 | 214 | ||
215 | static int rd_configure_device(struct se_device *dev) | 215 | static int rd_configure_device(struct se_device *dev) |
216 | { | 216 | { |
217 | struct rd_dev *rd_dev = RD_DEV(dev); | 217 | struct rd_dev *rd_dev = RD_DEV(dev); |
218 | struct rd_host *rd_host = dev->se_hba->hba_ptr; | 218 | struct rd_host *rd_host = dev->se_hba->hba_ptr; |
219 | int ret; | 219 | int ret; |
220 | 220 | ||
221 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { | 221 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { |
222 | pr_debug("Missing rd_pages= parameter\n"); | 222 | pr_debug("Missing rd_pages= parameter\n"); |
223 | return -EINVAL; | 223 | return -EINVAL; |
224 | } | 224 | } |
225 | 225 | ||
226 | ret = rd_build_device_space(rd_dev); | 226 | ret = rd_build_device_space(rd_dev); |
227 | if (ret < 0) | 227 | if (ret < 0) |
228 | goto fail; | 228 | goto fail; |
229 | 229 | ||
230 | dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; | 230 | dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; |
231 | dev->dev_attrib.hw_max_sectors = UINT_MAX; | 231 | dev->dev_attrib.hw_max_sectors = UINT_MAX; |
232 | dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; | 232 | dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; |
233 | 233 | ||
234 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; | 234 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; |
235 | 235 | ||
236 | pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" | 236 | pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" |
237 | " %u pages in %u tables, %lu total bytes\n", | 237 | " %u pages in %u tables, %lu total bytes\n", |
238 | rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, | 238 | rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, |
239 | rd_dev->sg_table_count, | 239 | rd_dev->sg_table_count, |
240 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); | 240 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); |
241 | 241 | ||
242 | return 0; | 242 | return 0; |
243 | 243 | ||
244 | fail: | 244 | fail: |
245 | rd_release_device_space(rd_dev); | 245 | rd_release_device_space(rd_dev); |
246 | return ret; | 246 | return ret; |
247 | } | 247 | } |
248 | 248 | ||
249 | static void rd_free_device(struct se_device *dev) | 249 | static void rd_free_device(struct se_device *dev) |
250 | { | 250 | { |
251 | struct rd_dev *rd_dev = RD_DEV(dev); | 251 | struct rd_dev *rd_dev = RD_DEV(dev); |
252 | 252 | ||
253 | rd_release_device_space(rd_dev); | 253 | rd_release_device_space(rd_dev); |
254 | kfree(rd_dev); | 254 | kfree(rd_dev); |
255 | } | 255 | } |
256 | 256 | ||
257 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | 257 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) |
258 | { | 258 | { |
259 | struct rd_dev_sg_table *sg_table; | 259 | struct rd_dev_sg_table *sg_table; |
260 | u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / | 260 | u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
261 | sizeof(struct scatterlist)); | 261 | sizeof(struct scatterlist)); |
262 | 262 | ||
263 | i = page / sg_per_table; | 263 | i = page / sg_per_table; |
264 | if (i < rd_dev->sg_table_count) { | 264 | if (i < rd_dev->sg_table_count) { |
265 | sg_table = &rd_dev->sg_table_array[i]; | 265 | sg_table = &rd_dev->sg_table_array[i]; |
266 | if ((sg_table->page_start_offset <= page) && | 266 | if ((sg_table->page_start_offset <= page) && |
267 | (sg_table->page_end_offset >= page)) | 267 | (sg_table->page_end_offset >= page)) |
268 | return sg_table; | 268 | return sg_table; |
269 | } | 269 | } |
270 | 270 | ||
271 | pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", | 271 | pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", |
272 | page); | 272 | page); |
273 | 273 | ||
274 | return NULL; | 274 | return NULL; |
275 | } | 275 | } |
276 | 276 | ||
277 | static sense_reason_t | 277 | static sense_reason_t |
278 | rd_execute_rw(struct se_cmd *cmd) | 278 | rd_execute_rw(struct se_cmd *cmd) |
279 | { | 279 | { |
280 | struct scatterlist *sgl = cmd->t_data_sg; | 280 | struct scatterlist *sgl = cmd->t_data_sg; |
281 | u32 sgl_nents = cmd->t_data_nents; | 281 | u32 sgl_nents = cmd->t_data_nents; |
282 | enum dma_data_direction data_direction = cmd->data_direction; | 282 | enum dma_data_direction data_direction = cmd->data_direction; |
283 | struct se_device *se_dev = cmd->se_dev; | 283 | struct se_device *se_dev = cmd->se_dev; |
284 | struct rd_dev *dev = RD_DEV(se_dev); | 284 | struct rd_dev *dev = RD_DEV(se_dev); |
285 | struct rd_dev_sg_table *table; | 285 | struct rd_dev_sg_table *table; |
286 | struct scatterlist *rd_sg; | 286 | struct scatterlist *rd_sg; |
287 | struct sg_mapping_iter m; | 287 | struct sg_mapping_iter m; |
288 | u32 rd_offset; | 288 | u32 rd_offset; |
289 | u32 rd_size; | 289 | u32 rd_size; |
290 | u32 rd_page; | 290 | u32 rd_page; |
291 | u32 src_len; | 291 | u32 src_len; |
292 | u64 tmp; | 292 | u64 tmp; |
293 | 293 | ||
294 | if (dev->rd_flags & RDF_NULLIO) { | ||
295 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
294 | tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; | 299 | tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; |
295 | rd_offset = do_div(tmp, PAGE_SIZE); | 300 | rd_offset = do_div(tmp, PAGE_SIZE); |
296 | rd_page = tmp; | 301 | rd_page = tmp; |
297 | rd_size = cmd->data_length; | 302 | rd_size = cmd->data_length; |
298 | 303 | ||
299 | table = rd_get_sg_table(dev, rd_page); | 304 | table = rd_get_sg_table(dev, rd_page); |
300 | if (!table) | 305 | if (!table) |
301 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 306 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
302 | 307 | ||
303 | rd_sg = &table->sg_table[rd_page - table->page_start_offset]; | 308 | rd_sg = &table->sg_table[rd_page - table->page_start_offset]; |
304 | 309 | ||
305 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", | 310 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", |
306 | dev->rd_dev_id, | 311 | dev->rd_dev_id, |
307 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", | 312 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", |
308 | cmd->t_task_lba, rd_size, rd_page, rd_offset); | 313 | cmd->t_task_lba, rd_size, rd_page, rd_offset); |
309 | 314 | ||
310 | src_len = PAGE_SIZE - rd_offset; | 315 | src_len = PAGE_SIZE - rd_offset; |
311 | sg_miter_start(&m, sgl, sgl_nents, | 316 | sg_miter_start(&m, sgl, sgl_nents, |
312 | data_direction == DMA_FROM_DEVICE ? | 317 | data_direction == DMA_FROM_DEVICE ? |
313 | SG_MITER_TO_SG : SG_MITER_FROM_SG); | 318 | SG_MITER_TO_SG : SG_MITER_FROM_SG); |
314 | while (rd_size) { | 319 | while (rd_size) { |
315 | u32 len; | 320 | u32 len; |
316 | void *rd_addr; | 321 | void *rd_addr; |
317 | 322 | ||
318 | sg_miter_next(&m); | 323 | sg_miter_next(&m); |
319 | if (!(u32)m.length) { | 324 | if (!(u32)m.length) { |
320 | pr_debug("RD[%u]: invalid sgl %p len %zu\n", | 325 | pr_debug("RD[%u]: invalid sgl %p len %zu\n", |
321 | dev->rd_dev_id, m.addr, m.length); | 326 | dev->rd_dev_id, m.addr, m.length); |
322 | sg_miter_stop(&m); | 327 | sg_miter_stop(&m); |
323 | return TCM_INCORRECT_AMOUNT_OF_DATA; | 328 | return TCM_INCORRECT_AMOUNT_OF_DATA; |
324 | } | 329 | } |
325 | len = min((u32)m.length, src_len); | 330 | len = min((u32)m.length, src_len); |
326 | if (len > rd_size) { | 331 | if (len > rd_size) { |
327 | pr_debug("RD[%u]: size underrun page %d offset %d " | 332 | pr_debug("RD[%u]: size underrun page %d offset %d " |
328 | "size %d\n", dev->rd_dev_id, | 333 | "size %d\n", dev->rd_dev_id, |
329 | rd_page, rd_offset, rd_size); | 334 | rd_page, rd_offset, rd_size); |
330 | len = rd_size; | 335 | len = rd_size; |
331 | } | 336 | } |
332 | m.consumed = len; | 337 | m.consumed = len; |
333 | 338 | ||
334 | rd_addr = sg_virt(rd_sg) + rd_offset; | 339 | rd_addr = sg_virt(rd_sg) + rd_offset; |
335 | 340 | ||
336 | if (data_direction == DMA_FROM_DEVICE) | 341 | if (data_direction == DMA_FROM_DEVICE) |
337 | memcpy(m.addr, rd_addr, len); | 342 | memcpy(m.addr, rd_addr, len); |
338 | else | 343 | else |
339 | memcpy(rd_addr, m.addr, len); | 344 | memcpy(rd_addr, m.addr, len); |
340 | 345 | ||
341 | rd_size -= len; | 346 | rd_size -= len; |
342 | if (!rd_size) | 347 | if (!rd_size) |
343 | continue; | 348 | continue; |
344 | 349 | ||
345 | src_len -= len; | 350 | src_len -= len; |
346 | if (src_len) { | 351 | if (src_len) { |
347 | rd_offset += len; | 352 | rd_offset += len; |
348 | continue; | 353 | continue; |
349 | } | 354 | } |
350 | 355 | ||
351 | /* rd page completed, next one please */ | 356 | /* rd page completed, next one please */ |
352 | rd_page++; | 357 | rd_page++; |
353 | rd_offset = 0; | 358 | rd_offset = 0; |
354 | src_len = PAGE_SIZE; | 359 | src_len = PAGE_SIZE; |
355 | if (rd_page <= table->page_end_offset) { | 360 | if (rd_page <= table->page_end_offset) { |
356 | rd_sg++; | 361 | rd_sg++; |
357 | continue; | 362 | continue; |
358 | } | 363 | } |
359 | 364 | ||
360 | table = rd_get_sg_table(dev, rd_page); | 365 | table = rd_get_sg_table(dev, rd_page); |
361 | if (!table) { | 366 | if (!table) { |
362 | sg_miter_stop(&m); | 367 | sg_miter_stop(&m); |
363 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 368 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
364 | } | 369 | } |
365 | 370 | ||
366 | /* since we increment, the first sg entry is correct */ | 371 | /* since we increment, the first sg entry is correct */ |
367 | rd_sg = table->sg_table; | 372 | rd_sg = table->sg_table; |
368 | } | 373 | } |
369 | sg_miter_stop(&m); | 374 | sg_miter_stop(&m); |
370 | 375 | ||
371 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 376 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
372 | return 0; | 377 | return 0; |
373 | } | 378 | } |
374 | 379 | ||
375 | enum { | 380 | enum { |
376 | Opt_rd_pages, Opt_err | 381 | Opt_rd_pages, Opt_rd_nullio, Opt_err |
377 | }; | 382 | }; |
378 | 383 | ||
379 | static match_table_t tokens = { | 384 | static match_table_t tokens = { |
380 | {Opt_rd_pages, "rd_pages=%d"}, | 385 | {Opt_rd_pages, "rd_pages=%d"}, |
386 | {Opt_rd_nullio, "rd_nullio=%d"}, | ||
381 | {Opt_err, NULL} | 387 | {Opt_err, NULL} |
382 | }; | 388 | }; |
383 | 389 | ||
384 | static ssize_t rd_set_configfs_dev_params(struct se_device *dev, | 390 | static ssize_t rd_set_configfs_dev_params(struct se_device *dev, |
385 | const char *page, ssize_t count) | 391 | const char *page, ssize_t count) |
386 | { | 392 | { |
387 | struct rd_dev *rd_dev = RD_DEV(dev); | 393 | struct rd_dev *rd_dev = RD_DEV(dev); |
388 | char *orig, *ptr, *opts; | 394 | char *orig, *ptr, *opts; |
389 | substring_t args[MAX_OPT_ARGS]; | 395 | substring_t args[MAX_OPT_ARGS]; |
390 | int ret = 0, arg, token; | 396 | int ret = 0, arg, token; |
391 | 397 | ||
392 | opts = kstrdup(page, GFP_KERNEL); | 398 | opts = kstrdup(page, GFP_KERNEL); |
393 | if (!opts) | 399 | if (!opts) |
394 | return -ENOMEM; | 400 | return -ENOMEM; |
395 | 401 | ||
396 | orig = opts; | 402 | orig = opts; |
397 | 403 | ||
398 | while ((ptr = strsep(&opts, ",\n")) != NULL) { | 404 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
399 | if (!*ptr) | 405 | if (!*ptr) |
400 | continue; | 406 | continue; |
401 | 407 | ||
402 | token = match_token(ptr, tokens, args); | 408 | token = match_token(ptr, tokens, args); |
403 | switch (token) { | 409 | switch (token) { |
404 | case Opt_rd_pages: | 410 | case Opt_rd_pages: |
405 | match_int(args, &arg); | 411 | match_int(args, &arg); |
406 | rd_dev->rd_page_count = arg; | 412 | rd_dev->rd_page_count = arg; |
407 | pr_debug("RAMDISK: Referencing Page" | 413 | pr_debug("RAMDISK: Referencing Page" |
408 | " Count: %u\n", rd_dev->rd_page_count); | 414 | " Count: %u\n", rd_dev->rd_page_count); |
409 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; | 415 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; |
410 | break; | 416 | break; |
417 | case Opt_rd_nullio: | ||
418 | match_int(args, &arg); | ||
419 | if (arg != 1) | ||
420 | break; | ||
421 | |||
422 | pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); | ||
423 | rd_dev->rd_flags |= RDF_NULLIO; | ||
424 | break; | ||
411 | default: | 425 | default: |
412 | break; | 426 | break; |
413 | } | 427 | } |
414 | } | 428 | } |
415 | 429 | ||
416 | kfree(orig); | 430 | kfree(orig); |
417 | return (!ret) ? count : ret; | 431 | return (!ret) ? count : ret; |
418 | } | 432 | } |
419 | 433 | ||
420 | static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) | 434 | static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) |
421 | { | 435 | { |
422 | struct rd_dev *rd_dev = RD_DEV(dev); | 436 | struct rd_dev *rd_dev = RD_DEV(dev); |
423 | 437 | ||
424 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", | 438 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", |
425 | rd_dev->rd_dev_id); | 439 | rd_dev->rd_dev_id); |
426 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" | 440 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" |
427 | " SG_table_count: %u\n", rd_dev->rd_page_count, | 441 | " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, |
428 | PAGE_SIZE, rd_dev->sg_table_count); | 442 | PAGE_SIZE, rd_dev->sg_table_count, |
443 | !!(rd_dev->rd_flags & RDF_NULLIO)); | ||
429 | return bl; | 444 | return bl; |
430 | } | 445 | } |
431 | 446 | ||
432 | static sector_t rd_get_blocks(struct se_device *dev) | 447 | static sector_t rd_get_blocks(struct se_device *dev) |
433 | { | 448 | { |
434 | struct rd_dev *rd_dev = RD_DEV(dev); | 449 | struct rd_dev *rd_dev = RD_DEV(dev); |
435 | 450 | ||
436 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / | 451 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / |
437 | dev->dev_attrib.block_size) - 1; | 452 | dev->dev_attrib.block_size) - 1; |
438 | 453 | ||
439 | return blocks_long; | 454 | return blocks_long; |
440 | } | 455 | } |
441 | 456 | ||
442 | static struct sbc_ops rd_sbc_ops = { | 457 | static struct sbc_ops rd_sbc_ops = { |
443 | .execute_rw = rd_execute_rw, | 458 | .execute_rw = rd_execute_rw, |
444 | }; | 459 | }; |
445 | 460 | ||
446 | static sense_reason_t | 461 | static sense_reason_t |
447 | rd_parse_cdb(struct se_cmd *cmd) | 462 | rd_parse_cdb(struct se_cmd *cmd) |
448 | { | 463 | { |
449 | return sbc_parse_cdb(cmd, &rd_sbc_ops); | 464 | return sbc_parse_cdb(cmd, &rd_sbc_ops); |
450 | } | 465 | } |
451 | 466 | ||
452 | static struct se_subsystem_api rd_mcp_template = { | 467 | static struct se_subsystem_api rd_mcp_template = { |
453 | .name = "rd_mcp", | 468 | .name = "rd_mcp", |
454 | .inquiry_prod = "RAMDISK-MCP", | 469 | .inquiry_prod = "RAMDISK-MCP", |
455 | .inquiry_rev = RD_MCP_VERSION, | 470 | .inquiry_rev = RD_MCP_VERSION, |
456 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | 471 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, |
457 | .attach_hba = rd_attach_hba, | 472 | .attach_hba = rd_attach_hba, |
458 | .detach_hba = rd_detach_hba, | 473 | .detach_hba = rd_detach_hba, |
459 | .alloc_device = rd_alloc_device, | 474 | .alloc_device = rd_alloc_device, |
460 | .configure_device = rd_configure_device, | 475 | .configure_device = rd_configure_device, |
461 | .free_device = rd_free_device, | 476 | .free_device = rd_free_device, |
462 | .parse_cdb = rd_parse_cdb, | 477 | .parse_cdb = rd_parse_cdb, |
463 | .set_configfs_dev_params = rd_set_configfs_dev_params, | 478 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
464 | .show_configfs_dev_params = rd_show_configfs_dev_params, | 479 | .show_configfs_dev_params = rd_show_configfs_dev_params, |
465 | .get_device_type = sbc_get_device_type, | 480 | .get_device_type = sbc_get_device_type, |
466 | .get_blocks = rd_get_blocks, | 481 | .get_blocks = rd_get_blocks, |
467 | }; | 482 | }; |
468 | 483 | ||
469 | int __init rd_module_init(void) | 484 | int __init rd_module_init(void) |
470 | { | 485 | { |
471 | int ret; | 486 | int ret; |
472 | 487 | ||
473 | ret = transport_subsystem_register(&rd_mcp_template); | 488 | ret = transport_subsystem_register(&rd_mcp_template); |
474 | if (ret < 0) { | 489 | if (ret < 0) { |
475 | return ret; | 490 | return ret; |
476 | } | 491 | } |
477 | 492 | ||
478 | return 0; | 493 | return 0; |
479 | } | 494 | } |
480 | 495 | ||
481 | void rd_module_exit(void) | 496 | void rd_module_exit(void) |
482 | { | 497 | { |
483 | transport_subsystem_release(&rd_mcp_template); | 498 | transport_subsystem_release(&rd_mcp_template); |
484 | } | 499 | } |
485 | 500 |
drivers/target/target_core_rd.h
1 | #ifndef TARGET_CORE_RD_H | 1 | #ifndef TARGET_CORE_RD_H |
2 | #define TARGET_CORE_RD_H | 2 | #define TARGET_CORE_RD_H |
3 | 3 | ||
4 | #define RD_HBA_VERSION "v4.0" | 4 | #define RD_HBA_VERSION "v4.0" |
5 | #define RD_MCP_VERSION "4.0" | 5 | #define RD_MCP_VERSION "4.0" |
6 | 6 | ||
7 | /* Largest piece of memory kmalloc can allocate */ | 7 | /* Largest piece of memory kmalloc can allocate */ |
8 | #define RD_MAX_ALLOCATION_SIZE 65536 | 8 | #define RD_MAX_ALLOCATION_SIZE 65536 |
9 | #define RD_DEVICE_QUEUE_DEPTH 32 | 9 | #define RD_DEVICE_QUEUE_DEPTH 32 |
10 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 | 10 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 |
11 | #define RD_BLOCKSIZE 512 | 11 | #define RD_BLOCKSIZE 512 |
12 | 12 | ||
13 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ | 13 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ |
14 | int __init rd_module_init(void); | 14 | int __init rd_module_init(void); |
15 | void rd_module_exit(void); | 15 | void rd_module_exit(void); |
16 | 16 | ||
17 | struct rd_dev_sg_table { | 17 | struct rd_dev_sg_table { |
18 | u32 page_start_offset; | 18 | u32 page_start_offset; |
19 | u32 page_end_offset; | 19 | u32 page_end_offset; |
20 | u32 rd_sg_count; | 20 | u32 rd_sg_count; |
21 | struct scatterlist *sg_table; | 21 | struct scatterlist *sg_table; |
22 | } ____cacheline_aligned; | 22 | } ____cacheline_aligned; |
23 | 23 | ||
24 | #define RDF_HAS_PAGE_COUNT 0x01 | 24 | #define RDF_HAS_PAGE_COUNT 0x01 |
25 | #define RDF_NULLIO 0x02 | ||
25 | 26 | ||
26 | struct rd_dev { | 27 | struct rd_dev { |
27 | struct se_device dev; | 28 | struct se_device dev; |
28 | u32 rd_flags; | 29 | u32 rd_flags; |
29 | /* Unique Ramdisk Device ID in Ramdisk HBA */ | 30 | /* Unique Ramdisk Device ID in Ramdisk HBA */ |
30 | u32 rd_dev_id; | 31 | u32 rd_dev_id; |
31 | /* Total page count for ramdisk device */ | 32 | /* Total page count for ramdisk device */ |
32 | u32 rd_page_count; | 33 | u32 rd_page_count; |
33 | /* Number of SG tables in sg_table_array */ | 34 | /* Number of SG tables in sg_table_array */ |
34 | u32 sg_table_count; | 35 | u32 sg_table_count; |
35 | /* Array of rd_dev_sg_table_t containing scatterlists */ | 36 | /* Array of rd_dev_sg_table_t containing scatterlists */ |
36 | struct rd_dev_sg_table *sg_table_array; | 37 | struct rd_dev_sg_table *sg_table_array; |
37 | /* Ramdisk HBA device is connected to */ | 38 | /* Ramdisk HBA device is connected to */ |
38 | struct rd_host *rd_host; | 39 | struct rd_host *rd_host; |
39 | } ____cacheline_aligned; | 40 | } ____cacheline_aligned; |
40 | 41 | ||
41 | struct rd_host { | 42 | struct rd_host { |
42 | u32 rd_host_dev_id_count; | 43 | u32 rd_host_dev_id_count; |
43 | u32 rd_host_id; /* Unique Ramdisk Host ID */ | 44 | u32 rd_host_id; /* Unique Ramdisk Host ID */ |
44 | } ____cacheline_aligned; | 45 | } ____cacheline_aligned; |
45 | 46 | ||
46 | #endif /* TARGET_CORE_RD_H */ | 47 | #endif /* TARGET_CORE_RD_H */ |
47 | 48 |