Commit d5470b596abdd566339b2417e807b1198be64b97

Authored by Adrian Bunk
Committed by Linus Torvalds
1 parent 07d45da616

fs/aio.c: make 3 functions static

Make the following needlessly global functions static:

- __put_ioctx()
- lookup_ioctx()
- io_submit_one()

Signed-off-by: Adrian Bunk <bunk@kernel.org>
Cc: Zach Brown <zach.brown@oracle.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 39 additions and 47 deletions Side-by-side Diff

... ... @@ -191,6 +191,43 @@
191 191 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
192 192 } while(0)
193 193  
  194 +
  195 +/* __put_ioctx
  196 + * Called when the last user of an aio context has gone away,
  197 + * and the struct needs to be freed.
  198 + */
  199 +static void __put_ioctx(struct kioctx *ctx)
  200 +{
  201 + unsigned nr_events = ctx->max_reqs;
  202 +
  203 + BUG_ON(ctx->reqs_active);
  204 +
  205 + cancel_delayed_work(&ctx->wq);
  206 + cancel_work_sync(&ctx->wq.work);
  207 + aio_free_ring(ctx);
  208 + mmdrop(ctx->mm);
  209 + ctx->mm = NULL;
  210 + pr_debug("__put_ioctx: freeing %p\n", ctx);
  211 + kmem_cache_free(kioctx_cachep, ctx);
  212 +
  213 + if (nr_events) {
  214 + spin_lock(&aio_nr_lock);
  215 + BUG_ON(aio_nr - nr_events > aio_nr);
  216 + aio_nr -= nr_events;
  217 + spin_unlock(&aio_nr_lock);
  218 + }
  219 +}
  220 +
  221 +#define get_ioctx(kioctx) do { \
  222 + BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
  223 + atomic_inc(&(kioctx)->users); \
  224 +} while (0)
  225 +#define put_ioctx(kioctx) do { \
  226 + BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
  227 + if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
  228 + __put_ioctx(kioctx); \
  229 +} while (0)
  230 +
194 231 /* ioctx_alloc
195 232 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
196 233 */
... ... @@ -361,32 +398,6 @@
361 398 }
362 399 }
363 400  
364   -/* __put_ioctx
365   - * Called when the last user of an aio context has gone away,
366   - * and the struct needs to be freed.
367   - */
368   -void __put_ioctx(struct kioctx *ctx)
369   -{
370   - unsigned nr_events = ctx->max_reqs;
371   -
372   - BUG_ON(ctx->reqs_active);
373   -
374   - cancel_delayed_work(&ctx->wq);
375   - cancel_work_sync(&ctx->wq.work);
376   - aio_free_ring(ctx);
377   - mmdrop(ctx->mm);
378   - ctx->mm = NULL;
379   - pr_debug("__put_ioctx: freeing %p\n", ctx);
380   - kmem_cache_free(kioctx_cachep, ctx);
381   -
382   - if (nr_events) {
383   - spin_lock(&aio_nr_lock);
384   - BUG_ON(aio_nr - nr_events > aio_nr);
385   - aio_nr -= nr_events;
386   - spin_unlock(&aio_nr_lock);
387   - }
388   -}
389   -
390 401 /* aio_get_req
391 402 * Allocate a slot for an aio request. Increments the users count
392 403 * of the kioctx so that the kioctx stays around until all requests are
... ... @@ -545,7 +556,7 @@
545 556 /* Lookup an ioctx id. ioctx_list is lockless for reads.
546 557 * FIXME: this is O(n) and is only suitable for development.
547 558 */
548   -struct kioctx *lookup_ioctx(unsigned long ctx_id)
  559 +static struct kioctx *lookup_ioctx(unsigned long ctx_id)
549 560 {
550 561 struct kioctx *ioctx;
551 562 struct mm_struct *mm;
... ... @@ -1552,7 +1563,7 @@
1552 1563 return 1;
1553 1564 }
1554 1565  
1555   -int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
  1566 +static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1556 1567 struct iocb *iocb)
1557 1568 {
1558 1569 struct kiocb *req;
... ... @@ -209,27 +209,8 @@
209 209 extern int aio_put_req(struct kiocb *iocb);
210 210 extern void kick_iocb(struct kiocb *iocb);
211 211 extern int aio_complete(struct kiocb *iocb, long res, long res2);
212   -extern void __put_ioctx(struct kioctx *ctx);
213 212 struct mm_struct;
214 213 extern void exit_aio(struct mm_struct *mm);
215   -extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
216   -extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
217   - struct iocb *iocb);
218   -
219   -/* semi private, but used by the 32bit emulations: */
220   -struct kioctx *lookup_ioctx(unsigned long ctx_id);
221   -int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
222   - struct iocb *iocb);
223   -
224   -#define get_ioctx(kioctx) do { \
225   - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
226   - atomic_inc(&(kioctx)->users); \
227   -} while (0)
228   -#define put_ioctx(kioctx) do { \
229   - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
230   - if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
231   - __put_ioctx(kioctx); \
232   -} while (0)
233 214  
234 215 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
235 216