Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
include
linux
uwb.h
Go to the documentation of this file.
1
/*
2
* Ultra Wide Band
3
* UWB API
4
*
5
* Copyright (C) 2005-2006 Intel Corporation
6
* Inaky Perez-Gonzalez <
[email protected]
>
7
*
8
* This program is free software; you can redistribute it and/or
9
* modify it under the terms of the GNU General Public License version
10
* 2 as published by the Free Software Foundation.
11
*
12
* This program is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
* GNU General Public License for more details.
16
*
17
* You should have received a copy of the GNU General Public License
18
* along with this program; if not, write to the Free Software
19
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
* 02110-1301, USA.
21
*
22
*
23
* FIXME: doc: overview of the API, different parts and pointers
24
*/
25
26
#ifndef __LINUX__UWB_H__
27
#define __LINUX__UWB_H__
28
29
#include <
linux/limits.h
>
30
#include <linux/device.h>
31
#include <
linux/mutex.h
>
32
#include <
linux/timer.h
>
33
#include <linux/wait.h>
34
#include <
linux/workqueue.h
>
35
#include <
linux/uwb/spec.h
>
36
#include <asm/page.h>
37
38
struct
uwb_dev
;
39
struct
uwb_beca_e
;
40
struct
uwb_rc
;
41
struct
uwb_rsv
;
42
struct
uwb_dbg
;
43
60
struct
uwb_dev
{
61
struct
mutex
mutex
;
62
struct
list_head
list_node
;
63
struct
device
dev
;
64
struct
uwb_rc
*
rc
;
/* radio controller */
65
struct
uwb_beca_e
*
bce
;
/* Beacon Cache Entry */
66
67
struct
uwb_mac_addr
mac_addr
;
68
struct
uwb_dev_addr
dev_addr
;
69
int
beacon_slot
;
70
DECLARE_BITMAP
(
streams
,
UWB_NUM_STREAMS
);
71
DECLARE_BITMAP
(last_availability_bm,
UWB_NUM_MAS
);
72
};
73
#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
74
83
enum
{
UWB_RC_CTX_MAX
= 256 };
84
85
87
struct
uwb_notifs_chain
{
88
struct
list_head
list
;
89
struct
mutex
mutex
;
90
};
91
92
/* Beacon cache list */
93
struct
uwb_beca
{
94
struct
list_head
list
;
95
size_t
entries
;
96
struct
mutex
mutex
;
97
};
98
99
/* Event handling thread. */
100
struct
uwbd
{
101
int
pid
;
102
struct
task_struct
*
task
;
103
wait_queue_head_t
wq
;
104
struct
list_head
event_list
;
105
spinlock_t
event_list_lock
;
106
};
107
112
struct
uwb_mas_bm
{
113
DECLARE_BITMAP
(bm,
UWB_NUM_MAS
);
114
DECLARE_BITMAP
(unsafe_bm,
UWB_NUM_MAS
);
115
int
safe
;
116
int
unsafe
;
117
};
118
141
enum
uwb_rsv_state
{
142
UWB_RSV_STATE_NONE
= 0,
143
UWB_RSV_STATE_O_INITIATED
,
144
UWB_RSV_STATE_O_PENDING
,
145
UWB_RSV_STATE_O_MODIFIED
,
146
UWB_RSV_STATE_O_ESTABLISHED
,
147
UWB_RSV_STATE_O_TO_BE_MOVED
,
148
UWB_RSV_STATE_O_MOVE_EXPANDING
,
149
UWB_RSV_STATE_O_MOVE_COMBINING
,
150
UWB_RSV_STATE_O_MOVE_REDUCING
,
151
UWB_RSV_STATE_T_ACCEPTED
,
152
UWB_RSV_STATE_T_DENIED
,
153
UWB_RSV_STATE_T_CONFLICT
,
154
UWB_RSV_STATE_T_PENDING
,
155
UWB_RSV_STATE_T_EXPANDING_ACCEPTED
,
156
UWB_RSV_STATE_T_EXPANDING_CONFLICT
,
157
UWB_RSV_STATE_T_EXPANDING_PENDING
,
158
UWB_RSV_STATE_T_EXPANDING_DENIED
,
159
UWB_RSV_STATE_T_RESIZED
,
160
161
UWB_RSV_STATE_LAST
,
162
};
163
164
enum
uwb_rsv_target_type
{
165
UWB_RSV_TARGET_DEV
,
166
UWB_RSV_TARGET_DEVADDR
,
167
};
168
176
struct
uwb_rsv_target
{
177
enum
uwb_rsv_target_type
type
;
178
union
{
179
struct
uwb_dev
*
dev
;
180
struct
uwb_dev_addr
devaddr
;
181
};
182
};
183
184
struct
uwb_rsv_move
{
185
struct
uwb_mas_bm
final_mas
;
186
struct
uwb_ie_drp
*
companion_drp_ie
;
187
struct
uwb_mas_bm
companion_mas
;
188
};
189
190
/*
191
* Number of streams reserved for reservations targeted at DevAddrs.
192
*/
193
#define UWB_NUM_GLOBAL_STREAMS 1
194
195
typedef
void
(*
uwb_rsv_cb_f
)(
struct
uwb_rsv
*
rsv
);
196
239
struct
uwb_rsv
{
240
struct
uwb_rc
*
rc
;
241
struct
list_head
rc_node
;
242
struct
list_head
pal_node
;
243
struct
kref
kref
;
244
245
struct
uwb_dev
*
owner
;
246
struct
uwb_rsv_target
target
;
247
enum
uwb_drp_type
type
;
248
int
max_mas
;
249
int
min_mas
;
250
int
max_interval
;
251
bool
is_multicast
;
252
253
uwb_rsv_cb_f
callback
;
254
void
*
pal_priv
;
255
256
enum
uwb_rsv_state
state
;
257
bool
needs_release_companion_mas
;
258
u8
stream
;
259
u8
tiebreaker
;
260
struct
uwb_mas_bm
mas
;
261
struct
uwb_ie_drp
*
drp_ie
;
262
struct
uwb_rsv_move
mv
;
263
bool
ie_valid
;
264
struct
timer_list
timer
;
265
struct
work_struct
handle_timeout_work
;
266
};
267
268
static
const
269
struct
uwb_mas_bm
uwb_mas_bm_zero = { .bm = { 0 } };
270
271
static
inline
void
uwb_mas_bm_copy_le(
void
*
dst
,
const
struct
uwb_mas_bm
*mas)
272
{
273
bitmap_copy_le
(dst, mas->bm,
UWB_NUM_MAS
);
274
}
275
299
struct
uwb_drp_avail
{
300
DECLARE_BITMAP
(global,
UWB_NUM_MAS
);
301
DECLARE_BITMAP
(local,
UWB_NUM_MAS
);
302
DECLARE_BITMAP
(pending,
UWB_NUM_MAS
);
303
struct
uwb_ie_drp_avail
ie
;
304
bool
ie_valid
;
305
};
306
307
struct
uwb_drp_backoff_win
{
308
u8
window
;
309
u8
n
;
310
int
total_expired
;
311
struct
timer_list
timer
;
312
bool
can_reserve_extra_mases
;
313
};
314
315
const
char
*
uwb_rsv_state_str
(
enum
uwb_rsv_state
state
);
316
const
char
*
uwb_rsv_type_str
(
enum
uwb_drp_type
type
);
317
318
struct
uwb_rsv
*
uwb_rsv_create
(
struct
uwb_rc
*
rc
,
uwb_rsv_cb_f
cb
,
319
void
*
pal_priv
);
320
void
uwb_rsv_destroy
(
struct
uwb_rsv
*
rsv
);
321
322
int
uwb_rsv_establish
(
struct
uwb_rsv
*
rsv
);
323
int
uwb_rsv_modify
(
struct
uwb_rsv
*
rsv
,
324
int
max_mas
,
int
min_mas
,
int
sparsity);
325
void
uwb_rsv_terminate
(
struct
uwb_rsv
*
rsv
);
326
327
void
uwb_rsv_accept
(
struct
uwb_rsv
*
rsv
,
uwb_rsv_cb_f
cb
,
void
*
pal_priv
);
328
329
void
uwb_rsv_get_usable_mas
(
struct
uwb_rsv
*orig_rsv,
struct
uwb_mas_bm
*
mas
);
330
371
struct
uwb_rc
{
372
struct
uwb_dev
uwb_dev
;
373
int
index
;
374
u16
version
;
375
376
struct
module
*
owner
;
377
void
*
priv
;
378
int
(*
start
)(
struct
uwb_rc
*
rc
);
379
void
(*
stop
)(
struct
uwb_rc
*
rc
);
380
int
(*
cmd
)(
struct
uwb_rc
*,
const
struct
uwb_rccb
*,
size_t
);
381
int
(*
reset
)(
struct
uwb_rc
*
rc
);
382
int
(*
filter_cmd
)(
struct
uwb_rc
*,
struct
uwb_rccb
**,
size_t
*);
383
int
(*
filter_event
)(
struct
uwb_rc
*,
struct
uwb_rceb
**,
const
size_t
,
384
size_t
*,
size_t
*);
385
386
spinlock_t
neh_lock
;
/* protects neh_* and ctx_* */
387
struct
list_head
neh_list
;
/* Open NE handles */
388
unsigned
long
ctx_bm
[
UWB_RC_CTX_MAX
/ 8 /
sizeof
(
unsigned
long
)];
389
u8
ctx_roll
;
390
391
int
beaconing
;
/* Beaconing state [channel number] */
392
int
beaconing_forced
;
393
int
scanning
;
394
enum
uwb_scan_type
scan_type:3;
395
unsigned
ready
:1;
396
struct
uwb_notifs_chain
notifs_chain
;
397
struct
uwb_beca
uwb_beca
;
398
399
struct
uwbd
uwbd
;
400
401
struct
uwb_drp_backoff_win
bow
;
402
struct
uwb_drp_avail
drp_avail
;
403
struct
list_head
reservations
;
404
struct
list_head
cnflt_alien_list
;
405
struct
uwb_mas_bm
cnflt_alien_bitmap
;
406
struct
mutex
rsvs_mutex
;
407
spinlock_t
rsvs_lock
;
408
struct
workqueue_struct
*
rsv_workq
;
409
410
struct
delayed_work
rsv_update_work
;
411
struct
delayed_work
rsv_alien_bp_work
;
412
int
set_drp_ie_pending
;
413
struct
mutex
ies_mutex
;
414
struct
uwb_rc_cmd_set_ie
*
ies
;
415
size_t
ies_capacity
;
416
417
struct
list_head
pals
;
418
int
active_pals
;
419
420
struct
uwb_dbg
*
dbg
;
421
};
422
423
453
struct
uwb_pal
{
454
struct
list_head
node
;
455
const
char
*
name
;
456
struct
device
*
device
;
457
struct
uwb_rc
*
rc
;
458
459
void
(*
channel_changed
)(
struct
uwb_pal
*
pal
,
int
channel
);
460
void
(*
new_rsv
)(
struct
uwb_pal
*
pal
,
struct
uwb_rsv
*
rsv
);
461
462
int
channel
;
463
struct
dentry
*
debugfs_dir
;
464
};
465
466
void
uwb_pal_init
(
struct
uwb_pal
*
pal
);
467
int
uwb_pal_register
(
struct
uwb_pal
*
pal
);
468
void
uwb_pal_unregister
(
struct
uwb_pal
*
pal
);
469
470
int
uwb_radio_start
(
struct
uwb_pal
*
pal
);
471
void
uwb_radio_stop
(
struct
uwb_pal
*
pal
);
472
473
/*
474
* General public API
475
*
476
* This API can be used by UWB device drivers or by those implementing
477
* UWB Radio Controllers
478
*/
479
struct
uwb_dev
*
uwb_dev_get_by_devaddr
(
struct
uwb_rc
*
rc
,
480
const
struct
uwb_dev_addr
*devaddr);
481
struct
uwb_dev
*
uwb_dev_get_by_rc
(
struct
uwb_dev
*,
struct
uwb_rc
*);
482
static
inline
void
uwb_dev_get(
struct
uwb_dev
*
uwb_dev
)
483
{
484
get_device
(&uwb_dev->
dev
);
485
}
486
static
inline
void
uwb_dev_put(
struct
uwb_dev
*
uwb_dev
)
487
{
488
put_device
(&uwb_dev->
dev
);
489
}
490
struct
uwb_dev
*
uwb_dev_try_get
(
struct
uwb_rc
*
rc
,
struct
uwb_dev
*
uwb_dev
);
491
503
typedef
int
(*
uwb_dev_for_each_f
)(
struct
device
*
dev
,
void
*
priv
);
504
int
uwb_dev_for_each
(
struct
uwb_rc
*
rc
,
uwb_dev_for_each_f
func
,
void
*
priv
);
505
506
struct
uwb_rc
*
uwb_rc_alloc
(
void
);
507
struct
uwb_rc
*
uwb_rc_get_by_dev
(
const
struct
uwb_dev_addr
*);
508
struct
uwb_rc
*
uwb_rc_get_by_grandpa
(
const
struct
device
*);
509
void
uwb_rc_put
(
struct
uwb_rc
*
rc
);
510
511
typedef
void
(*
uwb_rc_cmd_cb_f
)(
struct
uwb_rc
*
rc
,
void
*
arg
,
512
struct
uwb_rceb
*reply,
ssize_t
reply_size);
513
514
int
uwb_rc_cmd_async
(
struct
uwb_rc
*rc,
const
char
*cmd_name,
515
struct
uwb_rccb
*
cmd
,
size_t
cmd_size
,
516
u8
expected_type,
u16
expected_event,
517
uwb_rc_cmd_cb_f
cb
,
void
*
arg
);
518
ssize_t
uwb_rc_cmd
(
struct
uwb_rc
*rc,
const
char
*cmd_name,
519
struct
uwb_rccb
*
cmd
,
size_t
cmd_size
,
520
struct
uwb_rceb
*reply,
size_t
reply_size);
521
ssize_t
uwb_rc_vcmd
(
struct
uwb_rc
*rc,
const
char
*cmd_name,
522
struct
uwb_rccb
*
cmd
,
size_t
cmd_size
,
523
u8
expected_type,
u16
expected_event,
524
struct
uwb_rceb
**preply);
525
526
size_t
__uwb_addr_print
(
char
*,
size_t
,
const
unsigned
char
*,
int
);
527
528
int
uwb_rc_dev_addr_set
(
struct
uwb_rc
*,
const
struct
uwb_dev_addr
*);
529
int
uwb_rc_dev_addr_get
(
struct
uwb_rc
*,
struct
uwb_dev_addr
*);
530
int
uwb_rc_mac_addr_set
(
struct
uwb_rc
*,
const
struct
uwb_mac_addr
*);
531
int
uwb_rc_mac_addr_get
(
struct
uwb_rc
*,
struct
uwb_mac_addr
*);
532
int
__uwb_mac_addr_assigned_check
(
struct
device
*,
void
*);
533
int
__uwb_dev_addr_assigned_check
(
struct
device
*,
void
*);
534
535
/* Print in @buf a pretty repr of @addr */
536
static
inline
size_t
uwb_dev_addr_print(
char
*
buf
,
size_t
buf_size
,
537
const
struct
uwb_dev_addr
*
addr
)
538
{
539
return
__uwb_addr_print
(buf, buf_size, addr->
data
, 0);
540
}
541
542
/* Print in @buf a pretty repr of @addr */
543
static
inline
size_t
uwb_mac_addr_print(
char
*
buf
,
size_t
buf_size
,
544
const
struct
uwb_mac_addr
*
addr
)
545
{
546
return
__uwb_addr_print
(buf, buf_size, addr->
data
, 1);
547
}
548
549
/* @returns 0 if device addresses @addr2 and @addr1 are equal */
550
static
inline
int
uwb_dev_addr_cmp(
const
struct
uwb_dev_addr
*
addr1
,
551
const
struct
uwb_dev_addr
*
addr2
)
552
{
553
return
memcmp
(addr1, addr2,
sizeof
(*addr1));
554
}
555
556
/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
557
static
inline
int
uwb_mac_addr_cmp(
const
struct
uwb_mac_addr
*addr1,
558
const
struct
uwb_mac_addr
*addr2)
559
{
560
return
memcmp
(addr1, addr2,
sizeof
(*addr1));
561
}
562
563
/* @returns !0 if a MAC @addr is a broadcast address */
564
static
inline
int
uwb_mac_addr_bcast(
const
struct
uwb_mac_addr
*
addr
)
565
{
566
struct
uwb_mac_addr
bcast = {
567
.
data
= { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
568
};
569
return
!uwb_mac_addr_cmp(addr, &bcast);
570
}
571
572
/* @returns !0 if a MAC @addr is all zeroes*/
573
static
inline
int
uwb_mac_addr_unset(
const
struct
uwb_mac_addr
*
addr
)
574
{
575
struct
uwb_mac_addr
unset = {
576
.
data
= { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
577
};
578
return
!uwb_mac_addr_cmp(addr, &unset);
579
}
580
581
/* @returns !0 if the address is in use. */
582
static
inline
unsigned
__uwb_dev_addr_assigned(
struct
uwb_rc
*rc,
583
struct
uwb_dev_addr
*
addr
)
584
{
585
return
uwb_dev_for_each
(rc,
__uwb_dev_addr_assigned_check
, addr);
586
}
587
588
/*
589
* UWB Radio Controller API
590
*
591
* This API is used (in addition to the general API) to implement UWB
592
* Radio Controllers.
593
*/
594
void
uwb_rc_init
(
struct
uwb_rc
*);
595
int
uwb_rc_add
(
struct
uwb_rc
*,
struct
device
*dev,
void
*rc_priv);
596
void
uwb_rc_rm
(
struct
uwb_rc
*);
597
void
uwb_rc_neh_grok
(
struct
uwb_rc
*,
void
*,
size_t
);
598
void
uwb_rc_neh_error
(
struct
uwb_rc
*,
int
);
599
void
uwb_rc_reset_all
(
struct
uwb_rc
*rc);
600
void
uwb_rc_pre_reset
(
struct
uwb_rc
*rc);
601
int
uwb_rc_post_reset
(
struct
uwb_rc
*rc);
602
607
static
inline
bool
uwb_rsv_is_owner(
struct
uwb_rsv
*
rsv
)
608
{
609
return
rsv->
owner
== &rsv->
rc
->uwb_dev;
610
}
611
622
enum
uwb_notifs
{
623
UWB_NOTIF_ONAIR
,
624
UWB_NOTIF_OFFAIR
,
625
};
626
627
/* Callback function registered with UWB */
628
struct
uwb_notifs_handler
{
629
struct
list_head
list_node
;
630
void
(*
cb
)(
void
*,
struct
uwb_dev
*,
enum
uwb_notifs
);
631
void
*
data
;
632
};
633
634
int
uwb_notifs_register
(
struct
uwb_rc
*,
struct
uwb_notifs_handler
*);
635
int
uwb_notifs_deregister
(
struct
uwb_rc
*,
struct
uwb_notifs_handler
*);
636
637
674
struct
uwb_est_entry
{
675
size_t
size
;
676
unsigned
offset
;
677
enum
{
UWB_EST_16
= 0,
UWB_EST_8
= 1 }
type
;
678
};
679
680
int
uwb_est_register
(
u8
type
,
u8
code_high,
u16
vendor
,
u16
product
,
681
const
struct
uwb_est_entry
*,
size_t
entries
);
682
int
uwb_est_unregister
(
u8
type
,
u8
code_high,
u16
vendor
,
u16
product
,
683
const
struct
uwb_est_entry
*,
size_t
entries
);
684
ssize_t
uwb_est_find_size
(
struct
uwb_rc
*rc,
const
struct
uwb_rceb
*
rceb
,
685
size_t
len);
686
687
/* -- Misc */
688
689
enum
{
690
EDC_MAX_ERRORS
= 10,
691
EDC_ERROR_TIMEFRAME
=
HZ
,
692
};
693
694
/* error density counter */
695
struct
edc
{
696
unsigned
long
timestart
;
697
u16
errorcount
;
698
};
699
700
static
inline
701
void
edc_init(
struct
edc
*
edc
)
702
{
703
edc->
timestart
=
jiffies
;
704
}
705
706
/* Called when an error occurred.
707
* This is way to determine if the number of acceptable errors per time
708
* period has been exceeded. It is not accurate as there are cases in which
709
* this scheme will not work, for example if there are periodic occurrences
710
* of errors that straddle updates to the start time. This scheme is
711
* sufficient for our usage.
712
*
713
* @returns 1 if maximum acceptable errors per timeframe has been exceeded.
714
*/
715
static
inline
int
edc_inc(
struct
edc
*err_hist,
u16
max_err,
u16
timeframe)
716
{
717
unsigned
long
now;
718
719
now =
jiffies
;
720
if
(now - err_hist->
timestart
> timeframe) {
721
err_hist->
errorcount
= 1;
722
err_hist->
timestart
= now;
723
}
else
if
(++err_hist->
errorcount
> max_err) {
724
err_hist->
errorcount
= 0;
725
err_hist->
timestart
= now;
726
return
1;
727
}
728
return
0;
729
}
730
731
732
/* Information Element handling */
733
734
struct
uwb_ie_hdr
*
uwb_ie_next
(
void
**
ptr
,
size_t
*len);
735
int
uwb_rc_ie_add
(
struct
uwb_rc
*
uwb_rc
,
const
struct
uwb_ie_hdr
*ies,
size_t
size
);
736
int
uwb_rc_ie_rm
(
struct
uwb_rc
*
uwb_rc
,
enum
uwb_ie
element_id
);
737
738
/*
739
* Transmission statistics
740
*
741
* UWB uses LQI and RSSI (one byte values) for reporting radio signal
742
* strength and line quality indication. We do quick and dirty
743
* averages of those. They are signed values, btw.
744
*
745
* For 8 bit quantities, we keep the min, the max, an accumulator
746
* (@sigma) and a # of samples. When @samples gets to 255, we compute
747
* the average (@sigma / @samples), place it in @sigma and reset
748
* @samples to 1 (so we use it as the first sample).
749
*
750
* Now, statistically speaking, probably I am kicking the kidneys of
751
* some books I have in my shelves collecting dust, but I just want to
752
* get an approx, not the Nobel.
753
*
754
* LOCKING: there is no locking per se, but we try to keep a lockless
755
* schema. Only _add_samples() modifies the values--as long as you
756
* have other locking on top that makes sure that no two calls of
757
* _add_sample() happen at the same time, then we are fine. Now, for
758
* resetting the values we just set @samples to 0 and that makes the
759
* next _add_sample() to start with defaults. Reading the values in
760
* _show() currently can race, so you need to make sure the calls are
761
* under the same lock that protects calls to _add_sample(). FIXME:
762
* currently unlocked (It is not ultraprecise but does the trick. Bite
763
* me).
764
*/
765
struct
stats
{
766
s8
min
,
max
;
767
s16
sigma
;
768
atomic_t
samples
;
769
};
770
771
static
inline
772
void
stats_init(
struct
stats
*
stats
)
773
{
774
atomic_set
(&stats->
samples
, 0);
775
wmb
();
776
}
777
778
static
inline
779
void
stats_add_sample(
struct
stats
*
stats
,
s8
sample
)
780
{
781
s8
min
,
max
;
782
s16
sigma;
783
unsigned
samples =
atomic_read
(&stats->
samples
);
784
if
(samples == 0) {
/* it was zero before, so we initialize */
785
min = 127;
786
max = -128;
787
sigma = 0;
788
}
else
{
789
min = stats->
min
;
790
max = stats->
max
;
791
sigma = stats->
sigma
;
792
}
793
794
if
(sample < min)
/* compute new values */
795
min = sample;
796
else
if
(sample > max)
797
max = sample;
798
sigma += sample;
799
800
stats->
min
=
min
;
/* commit */
801
stats->
max
=
max
;
802
stats->
sigma
= sigma;
803
if
(
atomic_add_return
(1, &stats->
samples
) > 255) {
804
/* wrapped around! reset */
805
stats->
sigma
= sigma / 256;
806
atomic_set
(&stats->
samples
, 1);
807
}
808
}
809
810
static
inline
ssize_t
stats_show(
struct
stats *stats,
char
*
buf
)
811
{
812
int
min
,
max
,
avg
;
813
int
samples =
atomic_read
(&stats->
samples
);
814
if
(samples == 0)
815
min = max = avg = 0;
816
else
{
817
min = stats->
min
;
818
max = stats->
max
;
819
avg = stats->
sigma
/ samples;
820
}
821
return
scnprintf
(buf,
PAGE_SIZE
,
"%d %d %d\n"
, min, max, avg);
822
}
823
824
static
inline
ssize_t
stats_store(
struct
stats *stats,
const
char
*buf,
825
size_t
size
)
826
{
827
stats_init(stats);
828
return
size
;
829
}
830
831
#endif
/* #ifndef __LINUX__UWB_H__ */
Generated on Thu Jan 10 2013 14:52:47 for Linux Kernel by
1.8.2