43 err = percpu_counter_init(&p->
events, 1);
69 events = percpu_counter_sum(&p->
events);
79 events -= events >> periods;
81 percpu_counter_add(&p->
events, -events);
105 static void fprop_reflect_period_single(
struct fprop_global *
p,
116 if (pl->
period >= period) {
132 fprop_reflect_period_single(p, pl);
134 percpu_counter_add(&p->
events, 1);
140 unsigned long *numerator,
unsigned long *denominator)
146 seq = read_seqcount_begin(&p->
sequence);
147 fprop_reflect_period_single(p, pl);
149 den = percpu_counter_read_positive(&p->
events);
150 }
while (read_seqcount_retry(&p->
sequence, seq));
169 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
175 err = percpu_counter_init(&pl->
events, 0);
188 static void fprop_reflect_period_percpu(
struct fprop_global *p,
191 unsigned int period = p->
period;
199 if (pl->
period >= period) {
208 val = percpu_counter_sum(&pl->
events);
211 -val + (val >> (period-pl->
period)), PROP_BATCH);
221 fprop_reflect_period_percpu(p, pl);
223 percpu_counter_add(&p->
events, 1);
228 unsigned long *numerator,
unsigned long *denominator)
234 seq = read_seqcount_begin(&p->
sequence);
235 fprop_reflect_period_percpu(p, pl);
236 num = percpu_counter_read_positive(&pl->
events);
237 den = percpu_counter_read_positive(&p->
events);
238 }
while (read_seqcount_retry(&p->
sequence, seq));
262 unsigned long numerator, denominator;
269 fprop_reflect_period_percpu(p, pl);
271 percpu_counter_add(&p->
events, 1);