Commit 137d3edb48425f82a6a4226b664f90ed5e42eea5

Authored by Tejun Heo
Committed by Pierre Ossman
1 parent 60c9c7b1d9

sg: reimplement sg mapping iterator

This is alternative implementation of sg content iterator introduced
by commit 83e7d317... from Pierre Ossman in next-20080716.  As there's
already an sg iterator which iterates over sg entries themselves, name
this sg_mapping_iterator.

Slightly edited description from the original implementation follows.

Iteration over a sg list is not that trivial when you take into
account that memory pages might have to be mapped before being used.
Unfortunately, that means that some parts of the kernel restrict
themselves to directly accesible memory just to not have to deal with
the mess.

This patch adds a simple iterator system that allows any code to
easily traverse an sg list and not have to deal with all the details.
The user can decide to consume part of the iteration.  Also, iteration
can be stopped and resumed later if releasing the kmap between
iteration steps is necessary.  These features are useful to implement
piecemeal sg copying for interrupt drive PIO for example.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>

Showing 2 changed files with 168 additions and 46 deletions Side-by-side Diff

include/linux/scatterlist.h
... ... @@ -224,5 +224,43 @@
224 224 */
225 225 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
226 226  
  227 +
  228 +/*
  229 + * Mapping sg iterator
  230 + *
  231 + * Iterates over sg entries mapping page-by-page. On each successful
  232 + * iteration, @miter->page points to the mapped page and
  233 + * @miter->length bytes of data can be accessed at @miter->addr. As
  234 + * long as an interation is enclosed between start and stop, the user
  235 + * is free to choose control structure and when to stop.
  236 + *
  237 + * @miter->consumed is set to @miter->length on each iteration. It
  238 + * can be adjusted if the user can't consume all the bytes in one go.
  239 + * Also, a stopped iteration can be resumed by calling next on it.
  240 + * This is useful when iteration needs to release all resources and
  241 + * continue later (e.g. at the next interrupt).
  242 + */
  243 +
  244 +#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
  245 +
  246 +struct sg_mapping_iter {
  247 + /* the following three fields can be accessed directly */
  248 + struct page *page; /* currently mapped page */
  249 + void *addr; /* pointer to the mapped area */
  250 + size_t length; /* length of the mapped area */
  251 + size_t consumed; /* number of consumed bytes */
  252 +
  253 + /* these are internal states, keep away */
  254 + struct scatterlist *__sg; /* current entry */
  255 + unsigned int __nents; /* nr of remaining entries */
  256 + unsigned int __offset; /* offset within sg */
  257 + unsigned int __flags;
  258 +};
  259 +
  260 +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  261 + unsigned int nents, unsigned int flags);
  262 +bool sg_miter_next(struct sg_mapping_iter *miter);
  263 +void sg_miter_stop(struct sg_mapping_iter *miter);
  264 +
227 265 #endif /* _LINUX_SCATTERLIST_H */
... ... @@ -295,6 +295,117 @@
295 295 EXPORT_SYMBOL(sg_alloc_table);
296 296  
297 297 /**
  298 + * sg_miter_start - start mapping iteration over a sg list
  299 + * @miter: sg mapping iter to be started
  300 + * @sgl: sg list to iterate over
  301 + * @nents: number of sg entries
  302 + *
  303 + * Description:
  304 + * Starts mapping iterator @miter.
  305 + *
  306 + * Context:
  307 + * Don't care.
  308 + */
  309 +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  310 + unsigned int nents, unsigned int flags)
  311 +{
  312 + memset(miter, 0, sizeof(struct sg_mapping_iter));
  313 +
  314 + miter->__sg = sgl;
  315 + miter->__nents = nents;
  316 + miter->__offset = 0;
  317 + miter->__flags = flags;
  318 +}
  319 +EXPORT_SYMBOL(sg_miter_start);
  320 +
  321 +/**
  322 + * sg_miter_next - proceed mapping iterator to the next mapping
  323 + * @miter: sg mapping iter to proceed
  324 + *
  325 + * Description:
  326 + * Proceeds @miter@ to the next mapping. @miter@ should have been
  327 + * started using sg_miter_start(). On successful return,
  328 + * @miter@->page, @miter@->addr and @miter@->length point to the
  329 + * current mapping.
  330 + *
  331 + * Context:
  332 + * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
  333 + * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
  334 + *
  335 + * Returns:
  336 + * true if @miter contains the next mapping. false if end of sg
  337 + * list is reached.
  338 + */
  339 +bool sg_miter_next(struct sg_mapping_iter *miter)
  340 +{
  341 + unsigned int off, len;
  342 +
  343 + /* check for end and drop resources from the last iteration */
  344 + if (!miter->__nents)
  345 + return false;
  346 +
  347 + sg_miter_stop(miter);
  348 +
  349 + /* get to the next sg if necessary. __offset is adjusted by stop */
  350 + if (miter->__offset == miter->__sg->length && --miter->__nents) {
  351 + miter->__sg = sg_next(miter->__sg);
  352 + miter->__offset = 0;
  353 + }
  354 +
  355 + /* map the next page */
  356 + off = miter->__sg->offset + miter->__offset;
  357 + len = miter->__sg->length - miter->__offset;
  358 +
  359 + miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
  360 + off &= ~PAGE_MASK;
  361 + miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
  362 + miter->consumed = miter->length;
  363 +
  364 + if (miter->__flags & SG_MITER_ATOMIC)
  365 + miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
  366 + else
  367 + miter->addr = kmap(miter->page) + off;
  368 +
  369 + return true;
  370 +}
  371 +EXPORT_SYMBOL(sg_miter_next);
  372 +
  373 +/**
  374 + * sg_miter_stop - stop mapping iteration
  375 + * @miter: sg mapping iter to be stopped
  376 + *
  377 + * Description:
  378 + * Stops mapping iterator @miter. @miter should have been started
  379 + * started using sg_miter_start(). A stopped iteration can be
  380 + * resumed by calling sg_miter_next() on it. This is useful when
  381 + * resources (kmap) need to be released during iteration.
  382 + *
  383 + * Context:
  384 + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
  385 + */
  386 +void sg_miter_stop(struct sg_mapping_iter *miter)
  387 +{
  388 + WARN_ON(miter->consumed > miter->length);
  389 +
  390 + /* drop resources from the last iteration */
  391 + if (miter->addr) {
  392 + miter->__offset += miter->consumed;
  393 +
  394 + if (miter->__flags & SG_MITER_ATOMIC) {
  395 + WARN_ON(!irqs_disabled());
  396 + kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
  397 + } else
  398 + kunmap(miter->addr);
  399 +
  400 + miter->page = NULL;
  401 + miter->addr = NULL;
  402 + miter->length = 0;
  403 + miter->consumed = 0;
  404 + }
  405 +}
  406 +EXPORT_SYMBOL(sg_miter_stop);
  407 +
  408 +/**
298 409 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 410 * @sgl: The SG list
300 411 * @nents: Number of SG entries
301 412  
302 413  
303 414  
304 415  
305 416  
306 417  
... ... @@ -309,56 +420,29 @@
309 420 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 421 void *buf, size_t buflen, int to_buffer)
311 422 {
312   - struct scatterlist *sg;
313   - size_t buf_off = 0;
314   - int i;
  423 + unsigned int offset = 0;
  424 + struct sg_mapping_iter miter;
315 425  
316   - WARN_ON(!irqs_disabled());
  426 + sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
317 427  
318   - for_each_sg(sgl, sg, nents, i) {
319   - struct page *page;
320   - int n = 0;
321   - unsigned int sg_off = sg->offset;
322   - unsigned int sg_copy = sg->length;
  428 + while (sg_miter_next(&miter) && offset < buflen) {
  429 + unsigned int len;
323 430  
324   - if (sg_copy > buflen)
325   - sg_copy = buflen;
326   - buflen -= sg_copy;
  431 + len = min(miter.length, buflen - offset);
327 432  
328   - while (sg_copy > 0) {
329   - unsigned int page_copy;
330   - void *p;
331   -
332   - page_copy = PAGE_SIZE - sg_off;
333   - if (page_copy > sg_copy)
334   - page_copy = sg_copy;
335   -
336   - page = nth_page(sg_page(sg), n);
337   - p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338   -
339   - if (to_buffer)
340   - memcpy(buf + buf_off, p + sg_off, page_copy);
341   - else {
342   - memcpy(p + sg_off, buf + buf_off, page_copy);
343   - flush_kernel_dcache_page(page);
344   - }
345   -
346   - kunmap_atomic(p, KM_BIO_SRC_IRQ);
347   -
348   - buf_off += page_copy;
349   - sg_off += page_copy;
350   - if (sg_off == PAGE_SIZE) {
351   - sg_off = 0;
352   - n++;
353   - }
354   - sg_copy -= page_copy;
  433 + if (to_buffer)
  434 + memcpy(buf + offset, miter.addr, len);
  435 + else {
  436 + memcpy(miter.addr, buf + offset, len);
  437 + flush_kernel_dcache_page(miter.page);
355 438 }
356 439  
357   - if (!buflen)
358   - break;
  440 + offset += len;
359 441 }
360 442  
361   - return buf_off;
  443 + sg_miter_stop(&miter);
  444 +
  445 + return offset;
362 446 }
363 447  
364 448 /**