Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
include
linux
firewire.h
Go to the documentation of this file.
1
#ifndef _LINUX_FIREWIRE_H
2
#define _LINUX_FIREWIRE_H
3
4
#include <
linux/completion.h
>
5
#include <linux/device.h>
6
#include <
linux/dma-mapping.h
>
7
#include <linux/kernel.h>
8
#include <
linux/kref.h
>
9
#include <linux/list.h>
10
#include <
linux/mutex.h
>
11
#include <
linux/spinlock.h
>
12
#include <
linux/sysfs.h
>
13
#include <
linux/timer.h
>
14
#include <linux/types.h>
15
#include <
linux/workqueue.h
>
16
17
#include <
linux/atomic.h
>
18
#include <asm/byteorder.h>
19
20
#define CSR_REGISTER_BASE 0xfffff0000000ULL
21
22
/* register offsets are relative to CSR_REGISTER_BASE */
23
#define CSR_STATE_CLEAR 0x0
24
#define CSR_STATE_SET 0x4
25
#define CSR_NODE_IDS 0x8
26
#define CSR_RESET_START 0xc
27
#define CSR_SPLIT_TIMEOUT_HI 0x18
28
#define CSR_SPLIT_TIMEOUT_LO 0x1c
29
#define CSR_CYCLE_TIME 0x200
30
#define CSR_BUS_TIME 0x204
31
#define CSR_BUSY_TIMEOUT 0x210
32
#define CSR_PRIORITY_BUDGET 0x218
33
#define CSR_BUS_MANAGER_ID 0x21c
34
#define CSR_BANDWIDTH_AVAILABLE 0x220
35
#define CSR_CHANNELS_AVAILABLE 0x224
36
#define CSR_CHANNELS_AVAILABLE_HI 0x224
37
#define CSR_CHANNELS_AVAILABLE_LO 0x228
38
#define CSR_MAINT_UTILITY 0x230
39
#define CSR_BROADCAST_CHANNEL 0x234
40
#define CSR_CONFIG_ROM 0x400
41
#define CSR_CONFIG_ROM_END 0x800
42
#define CSR_OMPR 0x900
43
#define CSR_OPCR(i) (0x904 + (i) * 4)
44
#define CSR_IMPR 0x980
45
#define CSR_IPCR(i) (0x984 + (i) * 4)
46
#define CSR_FCP_COMMAND 0xB00
47
#define CSR_FCP_RESPONSE 0xD00
48
#define CSR_FCP_END 0xF00
49
#define CSR_TOPOLOGY_MAP 0x1000
50
#define CSR_TOPOLOGY_MAP_END 0x1400
51
#define CSR_SPEED_MAP 0x2000
52
#define CSR_SPEED_MAP_END 0x3000
53
54
#define CSR_OFFSET 0x40
55
#define CSR_LEAF 0x80
56
#define CSR_DIRECTORY 0xc0
57
58
#define CSR_DESCRIPTOR 0x01
59
#define CSR_VENDOR 0x03
60
#define CSR_HARDWARE_VERSION 0x04
61
#define CSR_UNIT 0x11
62
#define CSR_SPECIFIER_ID 0x12
63
#define CSR_VERSION 0x13
64
#define CSR_DEPENDENT_INFO 0x14
65
#define CSR_MODEL 0x17
66
#define CSR_DIRECTORY_ID 0x20
67
68
struct
fw_csr_iterator
{
69
const
u32
*
p
;
70
const
u32
*
end
;
71
};
72
73
void
fw_csr_iterator_init
(
struct
fw_csr_iterator
*ci,
const
u32
*
p
);
74
int
fw_csr_iterator_next
(
struct
fw_csr_iterator
*ci,
int
*
key
,
int
*
value
);
75
int
fw_csr_string
(
const
u32
*directory,
int
key
,
char
*
buf
,
size_t
size
);
76
77
extern
struct
bus_type
fw_bus_type
;
78
79
struct
fw_card_driver
;
80
struct
fw_node
;
81
82
struct
fw_card
{
83
const
struct
fw_card_driver
*
driver
;
84
struct
device
*
device
;
85
struct
kref
kref
;
86
struct
completion
done
;
87
88
int
node_id
;
89
int
generation
;
90
int
current_tlabel
;
91
u64
tlabel_mask
;
92
struct
list_head
transaction_list
;
93
u64
reset_jiffies
;
94
95
u32
split_timeout_hi
;
96
u32
split_timeout_lo
;
97
unsigned
int
split_timeout_cycles
;
98
unsigned
int
split_timeout_jiffies
;
99
100
unsigned
long
long
guid
;
101
unsigned
max_receive
;
102
int
link_speed
;
103
int
config_rom_generation
;
104
105
spinlock_t
lock
;
/* Take this lock when handling the lists in
106
* this struct. */
107
struct
fw_node
*
local_node
;
108
struct
fw_node
*
root_node
;
109
struct
fw_node
*
irm_node
;
110
u8
color
;
/* must be u8 to match the definition in struct fw_node */
111
int
gap_count
;
112
bool
beta_repeaters_present
;
113
114
int
index
;
115
struct
list_head
link
;
116
117
struct
list_head
phy_receiver_list
;
118
119
struct
delayed_work
br_work
;
/* bus reset job */
120
bool
br_short
;
121
122
struct
delayed_work
bm_work
;
/* bus manager job */
123
int
bm_retries
;
124
int
bm_generation
;
125
int
bm_node_id
;
126
bool
bm_abdicate
;
127
128
bool
priority_budget_implemented
;
/* controller feature */
129
bool
broadcast_channel_auto_allocated
;
/* controller feature */
130
131
bool
broadcast_channel_allocated
;
132
u32
broadcast_channel
;
133
__be32
topology_map
[(
CSR_TOPOLOGY_MAP_END
-
CSR_TOPOLOGY_MAP
) / 4];
134
135
__be32
maint_utility_register
;
136
};
137
138
static
inline
struct
fw_card
*fw_card_get(
struct
fw_card
*
card
)
139
{
140
kref_get(&card->
kref
);
141
142
return
card
;
143
}
144
145
void
fw_card_release
(
struct
kref
*
kref
);
146
147
static
inline
void
fw_card_put(
struct
fw_card
*
card
)
148
{
149
kref_put(&card->
kref
,
fw_card_release
);
150
}
151
152
struct
fw_attribute_group
{
153
struct
attribute_group
*
groups
[2];
154
struct
attribute_group
group
;
155
struct
attribute
*
attrs
[13];
156
};
157
158
enum
fw_device_state
{
159
FW_DEVICE_INITIALIZING
,
160
FW_DEVICE_RUNNING
,
161
FW_DEVICE_GONE
,
162
FW_DEVICE_SHUTDOWN
,
163
};
164
165
/*
166
* Note, fw_device.generation always has to be read before fw_device.node_id.
167
* Use SMP memory barriers to ensure this. Otherwise requests will be sent
168
* to an outdated node_id if the generation was updated in the meantime due
169
* to a bus reset.
170
*
171
* Likewise, fw-core will take care to update .node_id before .generation so
172
* that whenever fw_device.generation is current WRT the actual bus generation,
173
* fw_device.node_id is guaranteed to be current too.
174
*
175
* The same applies to fw_device.card->node_id vs. fw_device.generation.
176
*
177
* fw_device.config_rom and fw_device.config_rom_length may be accessed during
178
* the lifetime of any fw_unit belonging to the fw_device, before device_del()
179
* was called on the last fw_unit. Alternatively, they may be accessed while
180
* holding fw_device_rwsem.
181
*/
182
struct
fw_device
{
183
atomic_t
state
;
184
struct
fw_node
*
node
;
185
int
node_id
;
186
int
generation
;
187
unsigned
max_speed
;
188
struct
fw_card
*
card
;
189
struct
device
device
;
190
191
struct
mutex
client_list_mutex
;
192
struct
list_head
client_list
;
193
194
const
u32
*
config_rom
;
195
size_t
config_rom_length
;
196
int
config_rom_retries
;
197
unsigned
is_local
:1;
198
unsigned
max_rec
:4;
199
unsigned
cmc
:1;
200
unsigned
irmc
:1;
201
unsigned
bc_implemented
:2;
202
203
struct
delayed_work
work
;
204
struct
fw_attribute_group
attribute_group
;
205
};
206
207
static
inline
struct
fw_device
*
fw_device
(
struct
device
*
dev
)
208
{
209
return
container_of
(dev,
struct
fw_device
,
device
);
210
}
211
212
static
inline
int
fw_device_is_shutdown(
struct
fw_device
*
device
)
213
{
214
return
atomic_read
(&device->
state
) ==
FW_DEVICE_SHUTDOWN
;
215
}
216
217
int
fw_device_enable_phys_dma
(
struct
fw_device
*
device
);
218
219
/*
220
* fw_unit.directory must not be accessed after device_del(&fw_unit.device).
221
*/
222
struct
fw_unit
{
223
struct
device
device
;
224
const
u32
*
directory
;
225
struct
fw_attribute_group
attribute_group
;
226
};
227
228
static
inline
struct
fw_unit
*
fw_unit
(
struct
device
*
dev
)
229
{
230
return
container_of
(dev,
struct
fw_unit
,
device
);
231
}
232
233
static
inline
struct
fw_unit
*fw_unit_get(
struct
fw_unit
*
unit
)
234
{
235
get_device
(&unit->
device
);
236
237
return
unit
;
238
}
239
240
static
inline
void
fw_unit_put(
struct
fw_unit
*
unit
)
241
{
242
put_device
(&unit->
device
);
243
}
244
245
static
inline
struct
fw_device
*fw_parent_device(
struct
fw_unit
*
unit
)
246
{
247
return
fw_device
(unit->
device
.parent);
248
}
249
250
struct
ieee1394_device_id
;
251
252
struct
fw_driver
{
253
struct
device_driver
driver
;
254
/* Called when the parent device sits through a bus reset. */
255
void
(*
update
)(
struct
fw_unit
*
unit
);
256
const
struct
ieee1394_device_id
*
id_table
;
257
};
258
259
struct
fw_packet
;
260
struct
fw_request
;
261
262
typedef
void
(*
fw_packet_callback_t
)(
struct
fw_packet
*
packet
,
263
struct
fw_card
*
card
,
int
status
);
264
typedef
void
(*
fw_transaction_callback_t
)(
struct
fw_card
*
card
,
int
rcode,
265
void
*
data
,
size_t
length
,
266
void
*callback_data);
267
/*
268
* This callback handles an inbound request subaction. It is called in
269
* RCU read-side context, therefore must not sleep.
270
*
271
* The callback should not initiate outbound request subactions directly.
272
* Otherwise there is a danger of recursion of inbound and outbound
273
* transactions from and to the local node.
274
*
275
* The callback is responsible that either fw_send_response() or kfree()
276
* is called on the @request, except for FCP registers for which the core
277
* takes care of that.
278
*/
279
typedef
void
(*
fw_address_callback_t
)(
struct
fw_card
*
card
,
280
struct
fw_request
*
request
,
281
int
tcode,
int
destination,
int
source
,
282
int
generation
,
283
unsigned
long
long
offset
,
284
void
*
data
,
size_t
length
,
285
void
*callback_data);
286
287
struct
fw_packet
{
288
int
speed
;
289
int
generation
;
290
u32
header
[4];
291
size_t
header_length
;
292
void
*
payload
;
293
size_t
payload_length
;
294
dma_addr_t
payload_bus
;
295
bool
payload_mapped
;
296
u32
timestamp
;
297
298
/*
299
* This callback is called when the packet transmission has completed.
300
* For successful transmission, the status code is the ack received
301
* from the destination. Otherwise it is one of the juju-specific
302
* rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
303
* The callback can be called from tasklet context and thus
304
* must never block.
305
*/
306
fw_packet_callback_t
callback
;
307
int
ack
;
308
struct
list_head
link
;
309
void
*
driver_data
;
310
};
311
312
struct
fw_transaction
{
313
int
node_id
;
/* The generation is implied; it is always the current. */
314
int
tlabel
;
315
struct
list_head
link
;
316
struct
fw_card
*
card
;
317
bool
is_split_transaction
;
318
struct
timer_list
split_timeout_timer
;
319
320
struct
fw_packet
packet;
321
322
/*
323
* The data passed to the callback is valid only during the
324
* callback.
325
*/
326
fw_transaction_callback_t
callback
;
327
void
*
callback_data
;
328
};
329
330
struct
fw_address_handler
{
331
u64
offset
;
332
u64
length
;
333
fw_address_callback_t
address_callback
;
334
void
*
callback_data
;
335
struct
list_head
link
;
336
};
337
338
struct
fw_address_region
{
339
u64
start
;
340
u64
end
;
341
};
342
343
extern
const
struct
fw_address_region
fw_high_memory_region
;
344
345
int
fw_core_add_address_handler
(
struct
fw_address_handler
*handler,
346
const
struct
fw_address_region
*
region
);
347
void
fw_core_remove_address_handler
(
struct
fw_address_handler
*handler);
348
void
fw_send_response
(
struct
fw_card
*card,
349
struct
fw_request
*request,
int
rcode);
350
int
fw_get_request_speed
(
struct
fw_request
*request);
351
void
fw_send_request
(
struct
fw_card
*card,
struct
fw_transaction
*
t
,
352
int
tcode,
int
destination_id,
int
generation
,
int
speed,
353
unsigned
long
long
offset
,
void
*
payload
,
size_t
length
,
354
fw_transaction_callback_t
callback
,
void
*callback_data);
355
int
fw_cancel_transaction
(
struct
fw_card
*card,
356
struct
fw_transaction
*
transaction
);
357
int
fw_run_transaction
(
struct
fw_card
*card,
int
tcode,
int
destination_id,
358
int
generation
,
int
speed,
unsigned
long
long
offset
,
359
void
*
payload
,
size_t
length
);
360
const
char
*
fw_rcode_string
(
int
rcode);
361
362
static
inline
int
fw_stream_packet_destination_id(
int
tag
,
int
channel
,
int
sy)
363
{
364
return
tag << 14 | channel << 8 | sy;
365
}
366
367
struct
fw_descriptor
{
368
struct
list_head
link
;
369
size_t
length
;
370
u32
immediate
;
371
u32
key
;
372
const
u32
*
data
;
373
};
374
375
int
fw_core_add_descriptor
(
struct
fw_descriptor
*
desc
);
376
void
fw_core_remove_descriptor
(
struct
fw_descriptor
*
desc
);
377
378
/*
379
* The iso packet format allows for an immediate header/payload part
380
* stored in 'header' immediately after the packet info plus an
381
* indirect payload part that is pointer to by the 'payload' field.
382
* Applications can use one or the other or both to implement simple
383
* low-bandwidth streaming (e.g. audio) or more advanced
384
* scatter-gather streaming (e.g. assembling video frame automatically).
385
*/
386
struct
fw_iso_packet
{
387
u16
payload_length
;
/* Length of indirect payload */
388
u32
interrupt
:1;
/* Generate interrupt on this packet */
389
u32
skip
:1;
/* tx: Set to not send packet at all */
390
/* rx: Sync bit, wait for matching sy */
391
u32
tag
:2;
/* tx: Tag in packet header */
392
u32
sy
:4;
/* tx: Sy in packet header */
393
u32
header_length
:8;
/* Length of immediate header */
394
u32
header
[0];
/* tx: Top of 1394 isoch. data_block */
395
};
396
397
#define FW_ISO_CONTEXT_TRANSMIT 0
398
#define FW_ISO_CONTEXT_RECEIVE 1
399
#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2
400
401
#define FW_ISO_CONTEXT_MATCH_TAG0 1
402
#define FW_ISO_CONTEXT_MATCH_TAG1 2
403
#define FW_ISO_CONTEXT_MATCH_TAG2 4
404
#define FW_ISO_CONTEXT_MATCH_TAG3 8
405
#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
406
407
/*
408
* An iso buffer is just a set of pages mapped for DMA in the
409
* specified direction. Since the pages are to be used for DMA, they
410
* are not mapped into the kernel virtual address space. We store the
411
* DMA address in the page private. The helper function
412
* fw_iso_buffer_map() will map the pages into a given vma.
413
*/
414
struct
fw_iso_buffer
{
415
enum
dma_data_direction
direction
;
416
struct
page
**
pages
;
417
int
page_count
;
418
int
page_count_mapped
;
419
};
420
421
int
fw_iso_buffer_init
(
struct
fw_iso_buffer
*
buffer
,
struct
fw_card
*card,
422
int
page_count,
enum
dma_data_direction
direction
);
423
void
fw_iso_buffer_destroy
(
struct
fw_iso_buffer
*
buffer
,
struct
fw_card
*card);
424
size_t
fw_iso_buffer_lookup
(
struct
fw_iso_buffer
*
buffer
,
dma_addr_t
completed
);
425
426
struct
fw_iso_context
;
427
typedef
void
(*
fw_iso_callback_t
)(
struct
fw_iso_context
*
context
,
428
u32
cycle,
size_t
header_length
,
429
void
*
header
,
void
*
data
);
430
typedef
void
(*
fw_iso_mc_callback_t
)(
struct
fw_iso_context
*
context
,
431
dma_addr_t
completed
,
void
*
data
);
432
struct
fw_iso_context
{
433
struct
fw_card
*
card
;
434
int
type
;
435
int
channel
;
436
int
speed
;
437
size_t
header_size
;
438
union
{
439
fw_iso_callback_t
sc
;
440
fw_iso_mc_callback_t
mc
;
441
}
callback
;
442
void
*
callback_data
;
443
};
444
445
struct
fw_iso_context
*
fw_iso_context_create
(
struct
fw_card
*card,
446
int
type
,
int
channel
,
int
speed
,
size_t
header_size
,
447
fw_iso_callback_t
callback
,
void
*
callback_data
);
448
int
fw_iso_context_set_channels
(
struct
fw_iso_context
*
ctx
,
u64
*
channels
);
449
int
fw_iso_context_queue
(
struct
fw_iso_context
*
ctx
,
450
struct
fw_iso_packet
*packet,
451
struct
fw_iso_buffer
*
buffer
,
452
unsigned
long
payload
);
453
void
fw_iso_context_queue_flush
(
struct
fw_iso_context
*
ctx
);
454
int
fw_iso_context_flush_completions
(
struct
fw_iso_context
*
ctx
);
455
int
fw_iso_context_start
(
struct
fw_iso_context
*
ctx
,
456
int
cycle,
int
sync
,
int
tags
);
457
int
fw_iso_context_stop
(
struct
fw_iso_context
*
ctx
);
458
void
fw_iso_context_destroy
(
struct
fw_iso_context
*
ctx
);
459
void
fw_iso_resource_manage
(
struct
fw_card
*card,
int
generation
,
460
u64
channels_mask,
int
*
channel
,
int
*
bandwidth
,
461
bool
allocate);
462
463
extern
struct
workqueue_struct
*
fw_workqueue
;
464
465
#endif
/* _LINUX_FIREWIRE_H */
Generated on Thu Jan 10 2013 14:51:23 for Linux Kernel by
1.8.2