24 #include <linux/errno.h>
27 #include <linux/kernel.h>
29 #include <linux/slab.h>
32 #include <linux/export.h>
34 #include <asm/byteorder.h>
53 for (i = 0; i < page_count; i++) {
81 set_page_private(buffer->
pages[i], address);
84 if (i < buffer->page_count)
132 address = page_private(buffer->
pages[i]);
154 address = page_private(buffer->
pages[i]);
169 ctx = card->
driver->allocate_iso_context(card,
170 type, channel, header_size);
188 ctx->
card->driver->free_iso_context(ctx);
195 return ctx->
card->driver->start_iso(ctx, cycle, sync, tags);
201 return ctx->
card->driver->set_iso_channels(ctx, channels);
209 return ctx->
card->driver->queue_iso(ctx, packet, buffer, payload);
215 ctx->
card->driver->flush_queue_iso(ctx);
221 return ctx->
card->driver->flush_iso_completions(ctx);
227 return ctx->
card->driver->stop_iso(ctx);
246 for (
try = 0;
try < 5;
try++) {
247 new = allocate ? old - bandwidth : old +
bandwidth;
273 static int manage_channel(
struct fw_card *card,
int irm_id,
int generation,
282 for (channel = 0; channel < 32; channel++) {
283 if (!(channels_mask & 1 << channel))
289 if ((old & bit) != (all & bit))
308 if ((data[0] & bit) == (data[1] & bit))
325 static void deallocate_channel(
struct fw_card *card,
int irm_id,
326 int generation,
int channel)
331 mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
335 manage_channel(card, irm_id, generation, mask, offset,
false);
363 u64 channels_mask,
int *channel,
int *bandwidth,
366 u32 channels_hi = channels_mask;
367 u32 channels_lo = channels_mask >> 32;
370 spin_lock_irq(&card->
lock);
372 spin_unlock_irq(&card->
lock);
375 c = manage_channel(card, irm_id, generation, channels_hi,
378 if (channels_lo && c < 0) {
379 c = manage_channel(card, irm_id, generation, channels_lo,
387 if (allocate && channels_mask != 0 && c < 0)
393 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
397 if (allocate && ret < 0) {
399 deallocate_channel(card, irm_id, generation, c);