Blame view
include/linux/proportions.h
3.09 KB
145ca25eb lib: floating pro... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
/* * FLoating proportions * * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * This file contains the public data structure and API definitions. */ #ifndef _LINUX_PROPORTIONS_H #define _LINUX_PROPORTIONS_H #include <linux/percpu_counter.h> #include <linux/spinlock.h> #include <linux/mutex.h> struct prop_global { /* * The period over which we differentiate * * period = 2^shift */ int shift; /* * The total event counter aka 'time'. * * Treated as an unsigned long; the lower 'shift - 1' bits are the * counter bits, the remaining upper bits the period counter. */ struct percpu_counter events; }; /* * global proportion descriptor * * this is needed to consitently flip prop_global structures. */ struct prop_descriptor { int index; struct prop_global pg[2]; struct mutex mutex; /* serialize the prop_global switch */ }; int prop_descriptor_init(struct prop_descriptor *pd, int shift); void prop_change_shift(struct prop_descriptor *pd, int new_shift); /* * ----- PERCPU ------ */ struct prop_local_percpu { /* * the local events counter */ struct percpu_counter events; /* * snapshot of the last seen global state */ int shift; unsigned long period; |
740969f91 locking, lib/prop... |
61 |
raw_spinlock_t lock; /* protect the snapshot state */ |
145ca25eb lib: floating pro... |
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
}; int prop_local_init_percpu(struct prop_local_percpu *pl); void prop_local_destroy_percpu(struct prop_local_percpu *pl); void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, long *numerator, long *denominator); static inline void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) { unsigned long flags; local_irq_save(flags); __prop_inc_percpu(pd, pl); local_irq_restore(flags); } /* |
a42dde041 mm: bdi: allow se... |
81 82 83 84 85 86 87 88 89 90 91 92 93 |
* Limit the time part in order to ensure there are some bits left for the * cycle counter and fraction multiply. */ #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1) #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT) void __prop_inc_percpu_max(struct prop_descriptor *pd, struct prop_local_percpu *pl, long frac); /* |
145ca25eb lib: floating pro... |
94 95 96 97 98 99 100 101 102 103 104 105 106 |
* ----- SINGLE ------ */ struct prop_local_single { /* * the local events counter */ unsigned long events; /* * snapshot of the last seen global state * and a lock protecting this state */ |
145ca25eb lib: floating pro... |
107 |
unsigned long period; |
3fb669dd6 reorder struct pr... |
108 |
int shift; |
740969f91 locking, lib/prop... |
109 |
raw_spinlock_t lock; /* protect the snapshot state */ |
145ca25eb lib: floating pro... |
110 111 112 |
}; #define INIT_PROP_LOCAL_SINGLE(name) \ |
740969f91 locking, lib/prop... |
113 |
{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
145ca25eb lib: floating pro... |
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
} int prop_local_init_single(struct prop_local_single *pl); void prop_local_destroy_single(struct prop_local_single *pl); void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, long *numerator, long *denominator); static inline void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) { unsigned long flags; local_irq_save(flags); __prop_inc_single(pd, pl); local_irq_restore(flags); } #endif /* _LINUX_PROPORTIONS_H */ |