Commit febb02bd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (53 commits)
  DVB: firedtv: FireDTV S2 problems with tuning solved
  DVB: firedtv: fix printk format mismatch
  ieee1394: constify device ID tables
  ieee1394: raw1394: add sparse annotations to raw1394_compat_write
  ieee1394: Storage class should be before const qualifier
  ieee1394: sbp2: follow up on "ieee1394: inherit ud vendor_id from node vendor_id"
  firewire: core: optimize propagation of BROADCAST_CHANNEL
  firewire: core: simplify broadcast channel allocation
  firewire: core: increase bus manager grace period
  firewire: core: drop unused call parameters of close_transaction
  firewire: cdev: add closure to async stream ioctl
  firewire: cdev: simplify FW_CDEV_IOC_SEND_REQUEST return value
  firewire: cdev: fix race of ioctl_send_request with bus reset
  firewire: cdev: secure add_descriptor ioctl
  firewire: cdev: amendment to "add ioctl to query maximum transmission speed"
  firewire: broadcast channel support
  firewire: implement asynchronous stream transmission
  firewire: core: normalize a function argument name
  firewire: normalize a variable name
  firewire: core: remove condition which is always false
  ...
parents 0b4d569d 32a0f488
...@@ -63,8 +63,7 @@ static int descriptor_count; ...@@ -63,8 +63,7 @@ static int descriptor_count;
#define BIB_CMC ((1) << 30) #define BIB_CMC ((1) << 30)
#define BIB_IMC ((1) << 31) #define BIB_IMC ((1) << 31)
static u32 * static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
generate_config_rom(struct fw_card *card, size_t *config_rom_length)
{ {
struct fw_descriptor *desc; struct fw_descriptor *desc;
static u32 config_rom[256]; static u32 config_rom[256];
...@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length) ...@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
return config_rom; return config_rom;
} }
static void static void update_config_roms(void)
update_config_roms(void)
{ {
struct fw_card *card; struct fw_card *card;
u32 *config_rom; u32 *config_rom;
...@@ -141,8 +139,7 @@ update_config_roms(void) ...@@ -141,8 +139,7 @@ update_config_roms(void)
} }
} }
int int fw_core_add_descriptor(struct fw_descriptor *desc)
fw_core_add_descriptor(struct fw_descriptor *desc)
{ {
size_t i; size_t i;
...@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc) ...@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
return 0; return 0;
} }
void void fw_core_remove_descriptor(struct fw_descriptor *desc)
fw_core_remove_descriptor(struct fw_descriptor *desc)
{ {
mutex_lock(&card_mutex); mutex_lock(&card_mutex);
...@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc) ...@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex); mutex_unlock(&card_mutex);
} }
static int set_broadcast_channel(struct device *dev, void *data)
{
fw_device_set_broadcast_channel(fw_device(dev), (long)data);
return 0;
}
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
fw_iso_resource_manage(card, generation, 1ULL << 31,
&channel, &bandwidth, true);
if (channel == 31) {
card->broadcast_channel_allocated = true;
device_for_each_child(card->device, (void *)(long)generation,
set_broadcast_channel);
}
}
static const char gap_count_table[] = { static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
}; };
void void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{ {
int scheduled; int scheduled;
...@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay) ...@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card); fw_card_put(card);
} }
static void static void fw_card_bm_work(struct work_struct *work)
fw_card_bm_work(struct work_struct *work)
{ {
struct fw_card *card = container_of(work, struct fw_card, work.work); struct fw_card *card = container_of(work, struct fw_card, work.work);
struct fw_device *root_device; struct fw_device *root_device;
struct fw_node *root_node, *local_node; struct fw_node *root_node;
unsigned long flags; unsigned long flags;
int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; int root_id, new_root_id, irm_id, local_id;
int gap_count, generation, grace, rcode;
bool do_reset = false; bool do_reset = false;
bool root_device_is_running; bool root_device_is_running;
bool root_device_is_cmc; bool root_device_is_cmc;
__be32 lock_data[2]; __be32 lock_data[2];
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
local_node = card->local_node;
root_node = card->root_node;
if (local_node == NULL) { if (card->local_node == NULL) {
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
goto out_put_card; goto out_put_card;
} }
fw_node_get(local_node);
fw_node_get(root_node);
generation = card->generation; generation = card->generation;
root_node = card->root_node;
fw_node_get(root_node);
root_device = root_node->data; root_device = root_node->data;
root_device_is_running = root_device && root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING; atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc; root_device_is_cmc = root_device && root_device->cmc;
root_id = root_node->node_id; root_id = root_node->node_id;
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if (is_next_generation(generation, card->bm_generation) || if (is_next_generation(generation, card->bm_generation) ||
(card->bm_generation != generation && grace)) { (card->bm_generation != generation && grace)) {
...@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work) ...@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work)
* next generation. * next generation.
*/ */
irm_id = card->irm_node->node_id;
if (!card->irm_node->link_on) { if (!card->irm_node->link_on) {
new_root_id = local_node->node_id; new_root_id = local_id;
fw_notify("IRM has link off, making local node (%02x) root.\n", fw_notify("IRM has link off, making local node (%02x) root.\n",
new_root_id); new_root_id);
goto pick_me; goto pick_me;
} }
lock_data[0] = cpu_to_be32(0x3f); lock_data[0] = cpu_to_be32(0x3f);
lock_data[1] = cpu_to_be32(local_node->node_id); lock_data[1] = cpu_to_be32(local_id);
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
...@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work) ...@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work)
goto out; goto out;
if (rcode == RCODE_COMPLETE && if (rcode == RCODE_COMPLETE &&
lock_data[0] != cpu_to_be32(0x3f)) lock_data[0] != cpu_to_be32(0x3f)) {
/* Somebody else is BM, let them do the work. */
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
goto out; goto out;
}
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
...@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work) ...@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work)
* do a bus reset and pick the local node as * do a bus reset and pick the local node as
* root, and thus, IRM. * root, and thus, IRM.
*/ */
new_root_id = local_node->node_id; new_root_id = local_id;
fw_notify("BM lock failed, making local node (%02x) root.\n", fw_notify("BM lock failed, making local node (%02x) root.\n",
new_root_id); new_root_id);
goto pick_me; goto pick_me;
} }
} else if (card->bm_generation != generation) { } else if (card->bm_generation != generation) {
/* /*
* OK, we weren't BM in the last generation, and it's * We weren't BM in the last generation, and the last
* less than 100ms since last bus reset. Reschedule * bus reset is less than 125ms ago. Reschedule this job.
* this task 100ms from now.
*/ */
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out; goto out;
} }
...@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work) ...@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work)
* Either link_on is false, or we failed to read the * Either link_on is false, or we failed to read the
* config rom. In either case, pick another root. * config rom. In either case, pick another root.
*/ */
new_root_id = local_node->node_id; new_root_id = local_id;
} else if (!root_device_is_running) { } else if (!root_device_is_running) {
/* /*
* If we haven't probed this device yet, bail out now * If we haven't probed this device yet, bail out now
...@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work) ...@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work)
* successfully read the config rom, but it's not * successfully read the config rom, but it's not
* cycle master capable. * cycle master capable.
*/ */
new_root_id = local_node->node_id; new_root_id = local_id;
} }
pick_me: pick_me:
...@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work) ...@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work)
card->index, new_root_id, gap_count); card->index, new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count);
fw_core_initiate_bus_reset(card, 1); fw_core_initiate_bus_reset(card, 1);
/* Will allocate broadcast channel after the reset. */
} else {
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
} }
out: out:
fw_node_put(root_node); fw_node_put(root_node);
fw_node_put(local_node);
out_put_card: out_put_card:
fw_card_put(card); fw_card_put(card);
} }
static void static void flush_timer_callback(unsigned long data)
flush_timer_callback(unsigned long data)
{ {
struct fw_card *card = (struct fw_card *)data; struct fw_card *card = (struct fw_card *)data;
fw_flush_transactions(card); fw_flush_transactions(card);
} }
void void fw_card_initialize(struct fw_card *card,
fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, const struct fw_card_driver *driver,
struct device *device) struct device *device)
{ {
static atomic_t index = ATOMIC_INIT(-1); static atomic_t index = ATOMIC_INIT(-1);
...@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, ...@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
} }
EXPORT_SYMBOL(fw_card_initialize); EXPORT_SYMBOL(fw_card_initialize);
int int fw_card_add(struct fw_card *card,
fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid)
u32 max_receive, u32 link_speed, u64 guid)
{ {
u32 *config_rom; u32 *config_rom;
size_t length; size_t length;
int err; int ret;
card->max_receive = max_receive; card->max_receive = max_receive;
card->link_speed = link_speed; card->link_speed = link_speed;
...@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card, ...@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card,
list_add_tail(&card->link, &card_list); list_add_tail(&card->link, &card_list);
mutex_unlock(&card_mutex); mutex_unlock(&card_mutex);
err = card->driver->enable(card, config_rom, length); ret = card->driver->enable(card, config_rom, length);
if (err < 0) { if (ret < 0) {
mutex_lock(&card_mutex); mutex_lock(&card_mutex);
list_del(&card->link); list_del(&card->link);
mutex_unlock(&card_mutex); mutex_unlock(&card_mutex);
} }
return err;
return ret;
} }
EXPORT_SYMBOL(fw_card_add); EXPORT_SYMBOL(fw_card_add);
...@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add); ...@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add);
* dummy driver just fails all IO. * dummy driver just fails all IO.
*/ */
static int static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
{ {
BUG(); BUG();
return -1; return -1;
} }
static int static int dummy_update_phy_reg(struct fw_card *card, int address,
dummy_update_phy_reg(struct fw_card *card, int address, int clear_bits, int set_bits)
int clear_bits, int set_bits)
{ {
return -ENODEV; return -ENODEV;
} }
static int static int dummy_set_config_rom(struct fw_card *card,
dummy_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
u32 *config_rom, size_t length)
{ {
/* /*
* We take the card out of card_list before setting the dummy * We take the card out of card_list before setting the dummy
...@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card, ...@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card,
return -1; return -1;
} }
static void static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{ {
packet->callback(packet, card, -ENODEV); packet->callback(packet, card, -ENODEV);
} }
static void static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{ {
packet->callback(packet, card, -ENODEV); packet->callback(packet, card, -ENODEV);
} }
static int static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{ {
return -ENOENT; return -ENOENT;
} }
static int static int dummy_enable_phys_dma(struct fw_card *card,
dummy_enable_phys_dma(struct fw_card *card, int node_id, int generation)
int node_id, int generation)
{ {
return -ENODEV; return -ENODEV;
} }
...@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = { ...@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = {
.enable_phys_dma = dummy_enable_phys_dma, .enable_phys_dma = dummy_enable_phys_dma,
}; };
void void fw_card_release(struct kref *kref)
fw_card_release(struct kref *kref)
{ {
struct fw_card *card = container_of(kref, struct fw_card, kref); struct fw_card *card = container_of(kref, struct fw_card, kref);
complete(&card->done); complete(&card->done);
} }
void void fw_core_remove_card(struct fw_card *card)
fw_core_remove_card(struct fw_card *card)
{ {
card->driver->update_phy_reg(card, 4, card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0); PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
...@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card) ...@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card)
} }
EXPORT_SYMBOL(fw_core_remove_card); EXPORT_SYMBOL(fw_core_remove_card);
int int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{ {
int reg = short_reset ? 5 : 1; int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
......
...@@ -18,87 +18,162 @@ ...@@ -18,87 +18,162 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/module.h> #include <linux/compat.h>
#include <linux/kernel.h> #include <linux/delay.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/vmalloc.h> #include <linux/errno.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/delay.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/wait.h>
#include <linux/idr.h> #include <linux/workqueue.h>
#include <linux/compat.h>
#include <linux/firewire-cdev.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h" #include "fw-device.h"
#include "fw-topology.h"
#include "fw-transaction.h"
struct client {
u32 version;
struct fw_device *device;
spinlock_t lock;
bool in_shutdown;
struct idr resource_idr;
struct list_head event_list;
wait_queue_head_t wait;
u64 bus_reset_closure;
struct fw_iso_context *iso_context;
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
struct client;
struct client_resource {
struct list_head link; struct list_head link;
void (*release)(struct client *client, struct client_resource *r); struct kref kref;
u32 handle;
}; };
static inline void client_get(struct client *client)
{
kref_get(&client->kref);
}
static void client_release(struct kref *kref)
{
struct client *client = container_of(kref, struct client, kref);
fw_device_put(client->device);
kfree(client);
}
static void client_put(struct client *client)
{
kref_put(&client->kref, client_release);
}
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
struct client_resource *);
struct client_resource {
client_resource_release_fn_t release;
int handle;
};
struct address_handler_resource {
struct client_resource resource;
struct fw_address_handler handler;
__u64 closure;
struct client *client;
};
struct outbound_transaction_resource {
struct client_resource resource;
struct fw_transaction transaction;
};
struct inbound_transaction_resource {
struct client_resource resource;
struct fw_request *request;
void *data;
size_t length;
};
struct descriptor_resource {
struct client_resource resource;
struct fw_descriptor descriptor;
u32 data[0];
};
struct iso_resource {
struct client_resource resource;
struct client *client;
/* Schedule work and access todo only with client->lock held. */
struct delayed_work work;
enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
int generation;
u64 channels;
s32 bandwidth;
struct iso_resource_event *e_alloc, *e_dealloc;
};
static void schedule_iso_resource(struct iso_resource *);
static void release_iso_resource(struct client *, struct client_resource *);
/* /*
* dequeue_event() just kfree()'s the event, so the event has to be * dequeue_event() just kfree()'s the event, so the event has to be
* the first field in the struct. * the first field in a struct XYZ_event.
*/ */
struct event { struct event {
struct { void *data; size_t size; } v[2]; struct { void *data; size_t size; } v[2];
struct list_head link; struct list_head link;
}; };
struct bus_reset { struct bus_reset_event {
struct event event; struct event event;
struct fw_cdev_event_bus_reset reset; struct fw_cdev_event_bus_reset reset;
}; };
struct response { struct outbound_transaction_event {
struct event event; struct event event;
struct fw_transaction transaction;
struct client *client; struct client *client;
struct client_resource resource; struct outbound_transaction_resource r;
struct fw_cdev_event_response response; struct fw_cdev_event_response response;
}; };
struct iso_interrupt { struct inbound_transaction_event {
struct event event; struct event event;
struct fw_cdev_event_iso_interrupt interrupt; struct fw_cdev_event_request request;
}; };
struct client { struct iso_interrupt_event {
u32 version; struct event event;
struct fw_device *device; struct fw_cdev_event_iso_interrupt interrupt;
spinlock_t lock; };
u32 resource_handle;
struct list_head resource_list;
struct list_head event_list;
wait_queue_head_t wait;
u64 bus_reset_closure;
struct fw_iso_context *iso_context;
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
struct list_head link; struct iso_resource_event {
struct event event;
struct fw_cdev_event_iso_resource resource;
}; };
static inline void __user * static inline void __user *u64_to_uptr(__u64 value)
u64_to_uptr(__u64 value)
{ {
return (void __user *)(unsigned long)value; return (void __user *)(unsigned long)value;
} }
static inline __u64 static inline __u64 uptr_to_u64(void __user *ptr)
uptr_to_u64(void __user *ptr)
{ {
return (__u64)(unsigned long)ptr; return (__u64)(unsigned long)ptr;
} }
...@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file) ...@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
{ {
struct fw_device *device; struct fw_device *device;
struct client *client; struct client *client;
unsigned long flags;
device = fw_device_get_by_devt(inode->i_rdev); device = fw_device_get_by_devt(inode->i_rdev);
if (device == NULL) if (device == NULL)
...@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) ...@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
} }
client->device = device; client->device = device;
INIT_LIST_HEAD(&client->event_list);
INIT_LIST_HEAD(&client->resource_list);
spin_lock_init(&client->lock); spin_lock_init(&client->lock);
idr_init(&client->resource_idr);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait); init_waitqueue_head(&client->wait);
kref_init(&client->kref);
file->private_data = client; file->private_data = client;
spin_lock_irqsave(&device->card->lock, flags); mutex_lock(&device->client_list_mutex);
list_add_tail(&client->link, &device->client_list); list_add_tail(&client->link, &device->client_list);
spin_unlock_irqrestore(&device->card->lock, flags); mutex_unlock(&device->client_list_mutex);
return 0; return 0;
} }
...@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event, ...@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event,
event->v[1].size = size1; event->v[1].size = size1;
spin_lock_irqsave(&client->lock, flags); spin_lock_irqsave(&client->lock, flags);
list_add_tail(&event->link, &client->event_list); if (client->in_shutdown)
kfree(event);
else
list_add_tail(&event->link, &client->event_list);
spin_unlock_irqrestore(&client->lock, flags); spin_unlock_irqrestore(&client->lock, flags);
wake_up_interruptible(&client->wait); wake_up_interruptible(&client->wait);
} }
static int static int dequeue_event(struct client *client,
dequeue_event(struct client *client, char __user *buffer, size_t count) char __user *buffer, size_t count)
{ {
unsigned long flags;
struct event *event; struct event *event;
size_t size, total; size_t size, total;
int i, retval; int i, ret;
retval = wait_event_interruptible(client->wait, ret = wait_event_interruptible(client->wait,
!list_empty(&client->event_list) || !list_empty(&client->event_list) ||
fw_device_is_shutdown(client->device)); fw_device_is_shutdown(client->device));
if (retval < 0) if (ret < 0)
return retval; return ret;
if (list_empty(&client->event_list) && if (list_empty(&client->event_list) &&
fw_device_is_shutdown(client->device)) fw_device_is_shutdown(client->device))
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&client->lock, flags); spin_lock_irq(&client->lock);
event = container_of(client->event_list.next, struct event, link); event = list_first_entry(&client->event_list, struct event, link);
list_del(&event->link); list_del(&event->link);
spin_unlock_irqrestore(&client->lock, flags); spin_unlock_irq(&client->lock);
total = 0; total = 0;
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
size = min(event->v[i].size, count - total); size = min(event->v[i].size, count - total);
if (copy_to_user(buffer + total, event->v[i].data, size)) { if (copy_to_user(buffer + total, event->v[i].data, size)) {
retval = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
total += size; total += size;
} }
retval = total; ret = total;
out: out:
kfree(event); kfree(event);
return retval; return ret;
} }
static ssize_t static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
fw_device_op_read(struct file *file, size_t count, loff_t *offset)
char __user *buffer, size_t count, loff_t *offset)
{ {
struct client *client = file->private_data; struct client *client = file->private_data;
return dequeue_event(client, buffer, count); return dequeue_event(client, buffer, count);
} }
/* caller must hold card->lock so that node pointers can be dereferenced here */ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
static void struct client *client)
fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
struct client *client)
{ {
struct fw_card *card = client->device->card; struct fw_card *card = client->device->card;
spin_lock_irq(&card->lock);
event->closure = client->bus_reset_closure; event->closure = client->bus_reset_closure;
event->type = FW_CDEV_EVENT_BUS_RESET; event->type = FW_CDEV_EVENT_BUS_RESET;
event->generation = client->device->generation; event->generation = client->device->generation;
...@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, ...@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
event->bm_node_id = 0; /* FIXME: We don't track the BM. */ event->bm_node_id = 0; /* FIXME: We don't track the BM. */
event->irm_node_id = card->irm_node->node_id; event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id; event->root_node_id = card->root_node->node_id;
spin_unlock_irq(&card->lock);
} }
static void static void for_each_client(struct fw_device *device,
for_each_client(struct fw_device *device, void (*callback)(struct client *client))
void (*callback)(struct client *client))
{ {
struct fw_card *card = device->card;
struct client *c; struct client *c;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
mutex_lock(&device->client_list_mutex);
list_for_each_entry(c, &device->client_list, link) list_for_each_entry(c, &device->client_list, link)
callback(c); callback(c);
mutex_unlock(&device->client_list_mutex);
}
static int schedule_reallocations(int id, void *p, void *data)
{
struct client_resource *r = p;
spin_unlock_irqrestore(&card->lock, flags); if (r->release == release_iso_resource)
schedule_iso_resource(container_of(r,
struct iso_resource, resource));
return 0;
} }
static void static void queue_bus_reset_event(struct client *client)
queue_bus_reset_event(struct client *client)
{ {
struct bus_reset *bus_reset; struct bus_reset_event *e;
bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); e = kzalloc(sizeof(*e), GFP_KERNEL);
if (bus_reset == NULL) { if (e == NULL) {
fw_notify("Out of memory when allocating bus reset event\n"); fw_notify("Out of memory when allocating bus reset event\n");
return; return;
} }
fill_bus_reset_event(&bus_reset->reset, client); fill_bus_reset_event(&e->reset, client);
queue_event(client, &e->event,
&e->reset, sizeof(e->reset), NULL, 0);
queue_event(client, &bus_reset->event, spin_lock_irq(&client->lock);
&bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); idr_for_each(&client->resource_idr, schedule_reallocations, client);
spin_unlock_irq(&client->lock);
} }
void fw_device_cdev_update(struct fw_device *device) void fw_device_cdev_update(struct fw_device *device)
...@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer) ...@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer)
{ {
struct fw_cdev_get_info *get_info = buffer; struct fw_cdev_get_info *get_info = buffer;
struct fw_cdev_event_bus_reset bus_reset; struct fw_cdev_event_bus_reset bus_reset;
struct fw_card *card = client->device->card;
unsigned long ret = 0; unsigned long ret = 0;
client->version = get_info->version; client->version = get_info->version;
get_info->version = FW_CDEV_VERSION; get_info->version = FW_CDEV_VERSION;
get_info->card = client->device->card->index;
down_read(&fw_device_rwsem); down_read(&fw_device_rwsem);
...@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer) ...@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer)
client->bus_reset_closure = get_info->bus_reset_closure; client->bus_reset_closure = get_info->bus_reset_closure;
if (get_info->bus_reset != 0) { if (get_info->bus_reset != 0) {
void __user *uptr = u64_to_uptr(get_info->bus_reset); void __user *uptr = u64_to_uptr(get_info->bus_reset);
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
fill_bus_reset_event(&bus_reset, client); fill_bus_reset_event(&bus_reset, client);
spin_unlock_irqrestore(&card->lock, flags);
if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
return -EFAULT; return -EFAULT;
} }
get_info->card = card->index;
return 0; return 0;
} }
static void static int add_client_resource(struct client *client,
add_client_resource(struct client *client, struct client_resource *resource) struct client_resource *resource, gfp_t gfp_mask)
{ {
unsigned long flags; unsigned long flags;
int ret;
retry:
if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
return -ENOMEM;
spin_lock_irqsave(&client->lock, flags); spin_lock_irqsave(&client->lock, flags);
list_add_tail(&resource->link, &client->resource_list); if (client->in_shutdown)
resource->handle = client->resource_handle++; ret = -ECANCELED;
else
ret = idr_get_new(&client->resource_idr, resource,
&resource->handle);
if (ret >= 0) {
client_get(client);
if (resource->release == release_iso_resource)
schedule_iso_resource(container_of(resource,
struct iso_resource, resource));
}
spin_unlock_irqrestore(&client->lock, flags); spin_unlock_irqrestore(&client->lock, flags);
if (ret == -EAGAIN)
goto retry;
return ret < 0 ? ret : 0;
} }
static int static int release_client_resource(struct client *client, u32 handle,
release_client_resource(struct client *client, u32 handle, client_resource_release_fn_t release,
struct client_resource **resource) struct client_resource **resource)
{ {
struct client_resource *r; struct client_resource *r;
unsigned long flags;
spin_lock_irqsave(&client->lock, flags); spin_lock_irq(&client->lock);
list_for_each_entry(r, &client->resource_list, link) { if (client->in_shutdown)
if (r->handle == handle) { r = NULL;
list_del(&r->link); else
break; r = idr_find(&client->resource_idr, handle);
} if (r && r->release == release)
} idr_remove(&client->resource_idr, handle);
spin_unlock_irqrestore(&client->lock, flags); spin_unlock_irq(&client->lock);
if (&r->link == &client->resource_list) if (!(r && r->release == release))
return -EINVAL; return -EINVAL;
if (resource) if (resource)
...@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle, ...@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle,
else else
r->release(client, r); r->release(client, r);
client_put(client);
return 0; return 0;
} }
static void static void release_transaction(struct client *client,
release_transaction(struct client *client, struct client_resource *resource) struct client_resource *resource)
{ {
struct response *response = struct outbound_transaction_resource *r = container_of(resource,
container_of(resource, struct response, resource); struct outbound_transaction_resource, resource);
fw_cancel_transaction(client->device->card, &response->transaction); fw_cancel_transaction(client->device->card, &r->transaction);
} }
static void static void complete_transaction(struct fw_card *card, int rcode,
complete_transaction(struct fw_card *card, int rcode, void *payload, size_t length, void *data)
void *payload, size_t length, void *data)
{ {
struct response *response = data; struct outbound_transaction_event *e = data;
struct client *client = response->client; struct fw_cdev_event_response *rsp = &e->response;
struct client *client = e->client;
unsigned long flags; unsigned long flags;
struct fw_cdev_event_response *r = &response->response;
if (length < r->length) if (length < rsp->length)
r->length = length; rsp->length = length;
if (rcode == RCODE_COMPLETE) if (rcode == RCODE_COMPLETE)
memcpy(r->data, payload, r->length); memcpy(rsp->data, payload, rsp->length);
spin_lock_irqsave(&client->lock, flags); spin_lock_irqsave(&client->lock, flags);
list_del(&response->resource.link); /*
* 1. If called while in shutdown, the idr tree must be left untouched.
* The idr handle will be removed and the client reference will be
* dropped later.
* 2. If the call chain was release_client_resource ->
* release_transaction -> complete_transaction (instead of a normal
* conclusion of the transaction), i.e. if this resource was already
* unregistered from the idr, the client reference will be dropped
* by release_client_resource and we must not drop it here.
*/
if (!client->in_shutdown &&
idr_find(&client->resource_idr, e->r.resource.handle)) {
idr_remove(&client->resource_idr, e->r.resource.handle);
/* Drop the idr's reference */
client_put(client);
}
spin_unlock_irqrestore(&client->lock, flags); spin_unlock_irqrestore(&client->lock, flags);
r->type = FW_CDEV_EVENT_RESPONSE; rsp->type = FW_CDEV_EVENT_RESPONSE;
r->rcode = rcode; rsp->rcode = rcode;
/* /*
* In the case that sizeof(*r) doesn't align with the position of the * In the case that sizeof(*rsp) doesn't align with the position of the
* data, and the read is short, preserve an extra copy of the data * data, and the read is short, preserve an extra copy of the data
* to stay compatible with a pre-2.6.27 bug. Since the bug is harmless * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
* for short reads and some apps depended on it, this is both safe * for short reads and some apps depended on it, this is both safe
* and prudent for compatibility. * and prudent for compatibility.
*/ */
if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
queue_event(client, &response->event, r, sizeof(*r), queue_event(client, &e->event, rsp, sizeof(*rsp),
r->data, r->length); rsp->data, rsp->length);
else else
queue_event(client, &response->event, r, sizeof(*r) + r->length, queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
NULL, 0); NULL, 0);
/* Drop the transaction callback's reference */
client_put(client);
} }
static int ioctl_send_request(struct client *client, void *buffer) static int init_request(struct client *client,
struct fw_cdev_send_request *request,
int destination_id, int speed)
{ {
struct fw_device *device = client->device; struct outbound_transaction_event *e;
struct fw_cdev_send_request *request = buffer; int ret;
struct response *response;
/* What is the biggest size we'll accept, really? */ if (request->tcode != TCODE_STREAM_DATA &&
if (request->length > 4096) (request->length > 4096 || request->length > 512 << speed))
return -EINVAL; return -EIO;
response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
if (response == NULL) if (e == NULL)
return -ENOMEM; return -ENOMEM;
response->client = client; e->client = client;
response->response.length = request->length; e->response.length = request->length;
response->response.closure = request->closure; e->response.closure = request->closure;
if (request->data && if (request->data &&
copy_from_user(response->response.data, copy_from_user(e->response.data,
u64_to_uptr(request->data), request->length)) { u64_to_uptr(request->data), request->length)) {
kfree(response); ret = -EFAULT;
return -EFAULT; goto failed;
} }
response->resource.release = release_transaction; e->r.resource.release = release_transaction;
add_client_resource(client, &response->resource); ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
if (ret < 0)
goto failed;
fw_send_request(device->card, &response->transaction, /* Get a reference for the transaction callback */
request->tcode & 0x1f, client_get(client);
device->node->node_id,
request->generation,
device->max_speed,
request->offset,
response->response.data, request->length,
complete_transaction, response);
if (request->data) fw_send_request(client->device->card, &e->r.transaction,
return sizeof(request) + request->length; request->tcode, destination_id, request->generation,
else speed, request->offset, e->response.data,
return sizeof(request); request->length, complete_transaction, e);
return 0;
failed:
kfree(e);
return ret;
} }
struct address_handler { static int ioctl_send_request(struct client *client, void *buffer)
struct fw_address_handler handler; {
__u64 closure; struct fw_cdev_send_request *request = buffer;
struct client *client;
struct client_resource resource;
};
struct request { switch (request->tcode) {
struct fw_request *request; case TCODE_WRITE_QUADLET_REQUEST:
void *data; case TCODE_WRITE_BLOCK_REQUEST:
size_t length; case TCODE_READ_QUADLET_REQUEST:
struct client_resource resource; case TCODE_READ_BLOCK_REQUEST:
}; case TCODE_LOCK_MASK_SWAP:
case TCODE_LOCK_COMPARE_SWAP:
case TCODE_LOCK_FETCH_ADD:
case TCODE_LOCK_LITTLE_ADD:
case TCODE_LOCK_BOUNDED_ADD:
case TCODE_LOCK_WRAP_ADD:
case TCODE_LOCK_VENDOR_DEPENDENT:
break;
default:
return -EINVAL;
}
struct request_event { return init_request(client, request, client->device->node_id,
struct event event; client->device->max_speed);
struct fw_cdev_event_request request; }
};
static void static void release_request(struct client *client,
release_request(struct client *client, struct client_resource *resource) struct client_resource *resource)
{ {
struct request *request = struct inbound_transaction_resource *r = container_of(resource,
container_of(resource, struct request, resource); struct inbound_transaction_resource, resource);
fw_send_response(client->device->card, request->request, fw_send_response(client->device->card, r->request,
RCODE_CONFLICT_ERROR); RCODE_CONFLICT_ERROR);
kfree(request); kfree(r);
} }
static void static void handle_request(struct fw_card *card, struct fw_request *request,
handle_request(struct fw_card *card, struct fw_request *r, int tcode, int destination, int source,
int tcode, int destination, int source, int generation, int speed,
int generation, int speed, unsigned long long offset,
unsigned long long offset, void *payload, size_t length, void *callback_data)
void *payload, size_t length, void *callback_data)
{ {
struct address_handler *handler = callback_data; struct address_handler_resource *handler = callback_data;
struct request *request; struct inbound_transaction_resource *r;
struct request_event *e; struct inbound_transaction_event *e;
struct client *client = handler->client; int ret;
request = kmalloc(sizeof(*request), GFP_ATOMIC); r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (request == NULL || e == NULL) { if (r == NULL || e == NULL)
kfree(request); goto failed;
kfree(e);
fw_send_response(card, r, RCODE_CONFLICT_ERROR);
return;
}
request->request = r; r->request = request;
request->data = payload; r->data = payload;
request->length = length; r->length = length;
request->resource.release = release_request; r->resource.release = release_request;
add_client_resource(client, &request->resource); ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
if (ret < 0)
goto failed;
e->request.type = FW_CDEV_EVENT_REQUEST; e->request.type = FW_CDEV_EVENT_REQUEST;
e->request.tcode = tcode; e->request.tcode = tcode;
e->request.offset = offset; e->request.offset = offset;
e->request.length = length; e->request.length = length;
e->request.handle = request->resource.handle; e->request.handle = r->resource.handle;
e->request.closure = handler->closure; e->request.closure = handler->closure;
queue_event(client, &e->event, queue_event(handler->client, &e->event,
&e->request, sizeof(e->request), payload, length); &e->request, sizeof(e->request), payload, length);
return;
failed:
kfree(r);
kfree(e);
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
} }
static void static void release_address_handler(struct client *client,
release_address_handler(struct client *client, struct client_resource *resource)
struct client_resource *resource)
{ {
struct address_handler *handler = struct address_handler_resource *r =
container_of(resource, struct address_handler, resource); container_of(resource, struct address_handler_resource, resource);
fw_core_remove_address_handler(&handler->handler); fw_core_remove_address_handler(&r->handler);
kfree(handler); kfree(r);
} }
static int ioctl_allocate(struct client *client, void *buffer) static int ioctl_allocate(struct client *client, void *buffer)
{ {
struct fw_cdev_allocate *request = buffer; struct fw_cdev_allocate *request = buffer;
struct address_handler *handler; struct address_handler_resource *r;
struct fw_address_region region; struct fw_address_region region;
int ret;
handler = kmalloc(sizeof(*handler), GFP_KERNEL); r = kmalloc(sizeof(*r), GFP_KERNEL);
if (handler == NULL) if (r == NULL)
return -ENOMEM; return -ENOMEM;
region.start = request->offset; region.start = request->offset;
region.end = request->offset + request->length; region.end = request->offset + request->length;
handler->handler.length = request->length; r->handler.length = request->length;
handler->handler.address_callback = handle_request; r->handler.address_callback = handle_request;
handler->handler.callback_data = handler; r->handler.callback_data = r;
handler->closure = request->closure; r->closure = request->closure;
handler->client = client; r->client = client;
if (fw_core_add_address_handler(&handler->handler, &region) < 0) { ret = fw_core_add_address_handler(&r->handler, &region);
kfree(handler); if (ret < 0) {
return -EBUSY; kfree(r);
return ret;
} }
handler->resource.release = release_address_handler; r->resource.release = release_address_handler;
add_client_resource(client, &handler->resource); ret = add_client_resource(client, &r->resource, GFP_KERNEL);
request->handle = handler->resource.handle; if (ret < 0) {
release_address_handler(client, &r->resource);
return ret;
}
request->handle = r->resource.handle;
return 0; return 0;
} }
...@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer) ...@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer)
{ {
struct fw_cdev_deallocate *request = buffer; struct fw_cdev_deallocate *request = buffer;
return release_client_resource(client, request->handle, NULL); return release_client_resource(client, request->handle,
release_address_handler, NULL);
} }
static int ioctl_send_response(struct client *client, void *buffer) static int ioctl_send_response(struct client *client, void *buffer)
{ {
struct fw_cdev_send_response *request = buffer; struct fw_cdev_send_response *request = buffer;
struct client_resource *resource; struct client_resource *resource;
struct request *r; struct inbound_transaction_resource *r;
if (release_client_resource(client, request->handle, &resource) < 0) if (release_client_resource(client, request->handle,
release_request, &resource) < 0)
return -EINVAL; return -EINVAL;
r = container_of(resource, struct request, resource);
r = container_of(resource, struct inbound_transaction_resource,
resource);
if (request->length < r->length) if (request->length < r->length)
r->length = request->length; r->length = request->length;
if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
...@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer) ...@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
return fw_core_initiate_bus_reset(client->device->card, short_reset); return fw_core_initiate_bus_reset(client->device->card, short_reset);
} }
struct descriptor {
struct fw_descriptor d;
struct client_resource resource;
u32 data[0];
};
static void release_descriptor(struct client *client, static void release_descriptor(struct client *client,
struct client_resource *resource) struct client_resource *resource)
{ {
struct descriptor *descriptor = struct descriptor_resource *r =
container_of(resource, struct descriptor, resource); container_of(resource, struct descriptor_resource, resource);
fw_core_remove_descriptor(&descriptor->d); fw_core_remove_descriptor(&r->descriptor);
kfree(descriptor); kfree(r);
} }
static int ioctl_add_descriptor(struct client *client, void *buffer) static int ioctl_add_descriptor(struct client *client, void *buffer)
{ {
struct fw_cdev_add_descriptor *request = buffer; struct fw_cdev_add_descriptor *request = buffer;
struct descriptor *descriptor; struct fw_card *card = client->device->card;
int retval; struct descriptor_resource *r;
int ret;
/* Access policy: Allow this ioctl only on local nodes' device files. */
spin_lock_irq(&card->lock);
ret = client->device->node_id != card->local_node->node_id;
spin_unlock_irq(&card->lock);
if (ret)
return -ENOSYS;
if (request->length > 256) if (request->length > 256)
return -EINVAL; return -EINVAL;
descriptor = r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); if (r == NULL)
if (descriptor == NULL)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(descriptor->data, if (copy_from_user(r->data,
u64_to_uptr(request->data), request->length * 4)) { u64_to_uptr(request->data), request->length * 4)) {
kfree(descriptor); ret = -EFAULT;
return -EFAULT; goto failed;
} }
descriptor->d.length = request->length; r->descriptor.length = request->length;
descriptor->d.immediate = request->immediate; r->descriptor.immediate = request->immediate;
descriptor->d.key = request->key; r->descriptor.key = request->key;
descriptor->d.data = descriptor->data; r->descriptor.data = r->data;
retval = fw_core_add_descriptor(&descriptor->d); ret = fw_core_add_descriptor(&r->descriptor);
if (retval < 0) { if (ret < 0)
kfree(descriptor); goto failed;
return retval;
}
descriptor->resource.release = release_descriptor; r->resource.release = release_descriptor;
add_client_resource(client, &descriptor->resource); ret = add_client_resource(client, &r->resource, GFP_KERNEL);
request->handle = descriptor->resource.handle; if (ret < 0) {
fw_core_remove_descriptor(&r->descriptor);
goto failed;
}
request->handle = r->resource.handle;
return 0; return 0;
failed:
kfree(r);
return ret;
} }
static int ioctl_remove_descriptor(struct client *client, void *buffer) static int ioctl_remove_descriptor(struct client *client, void *buffer)
{ {
struct fw_cdev_remove_descriptor *request = buffer; struct fw_cdev_remove_descriptor *request = buffer;
return release_client_resource(client, request->handle, NULL); return release_client_resource(client, request->handle,
release_descriptor, NULL);
} }
static void static void iso_callback(struct fw_iso_context *context, u32 cycle,
iso_callback(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data)
size_t header_length, void *header, void *data)
{ {
struct client *client = data; struct client *client = data;
struct iso_interrupt *irq; struct iso_interrupt_event *e;
irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
if (irq == NULL) if (e == NULL)
return; return;
irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
irq->interrupt.closure = client->iso_closure; e->interrupt.closure = client->iso_closure;
irq->interrupt.cycle = cycle; e->interrupt.cycle = cycle;
irq->interrupt.header_length = header_length; e->interrupt.header_length = header_length;
memcpy(irq->interrupt.header, header, header_length); memcpy(e->interrupt.header, header, header_length);
queue_event(client, &irq->event, &irq->interrupt, queue_event(client, &e->event, &e->interrupt,
sizeof(irq->interrupt) + header_length, NULL, 0); sizeof(e->interrupt) + header_length, NULL, 0);
} }
static int ioctl_create_iso_context(struct client *client, void *buffer) static int ioctl_create_iso_context(struct client *client, void *buffer)
...@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) ...@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer)
return 0; return 0;
} }
static void iso_resource_work(struct work_struct *work)
{
struct iso_resource_event *e;
struct iso_resource *r =
container_of(work, struct iso_resource, work.work);
struct client *client = r->client;
int generation, channel, bandwidth, todo;
bool skip, free, success;
spin_lock_irq(&client->lock);
generation = client->device->generation;
todo = r->todo;
/* Allow 1000ms grace period for other reallocations. */
if (todo == ISO_RES_ALLOC &&
time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
client_get(client);
skip = true;
} else {
/* We could be called twice within the same generation. */
skip = todo == ISO_RES_REALLOC &&
r->generation == generation;
}
free = todo == ISO_RES_DEALLOC ||
todo == ISO_RES_ALLOC_ONCE ||
todo == ISO_RES_DEALLOC_ONCE;
r->generation = generation;
spin_unlock_irq(&client->lock);
if (skip)
goto out;
bandwidth = r->bandwidth;
fw_iso_resource_manage(client->device->card, generation,
r->channels, &channel, &bandwidth,
todo == ISO_RES_ALLOC ||
todo == ISO_RES_REALLOC ||
todo == ISO_RES_ALLOC_ONCE);
/*
* Is this generation outdated already? As long as this resource sticks
* in the idr, it will be scheduled again for a newer generation or at
* shutdown.
*/
if (channel == -EAGAIN &&
(todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
goto out;
success = channel >= 0 || bandwidth > 0;
spin_lock_irq(&client->lock);
/*
* Transit from allocation to reallocation, except if the client
* requested deallocation in the meantime.
*/
if (r->todo == ISO_RES_ALLOC)
r->todo = ISO_RES_REALLOC;
/*
* Allocation or reallocation failure? Pull this resource out of the
* idr and prepare for deletion, unless the client is shutting down.
*/
if (r->todo == ISO_RES_REALLOC && !success &&
!client->in_shutdown &&
idr_find(&client->resource_idr, r->resource.handle)) {
idr_remove(&client->resource_idr, r->resource.handle);
client_put(client);
free = true;
}
spin_unlock_irq(&client->lock);
if (todo == ISO_RES_ALLOC && channel >= 0)
r->channels = 1ULL << channel;
if (todo == ISO_RES_REALLOC && success)
goto out;
if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
e = r->e_alloc;
r->e_alloc = NULL;
} else {
e = r->e_dealloc;
r->e_dealloc = NULL;
}
e->resource.handle = r->resource.handle;
e->resource.channel = channel;
e->resource.bandwidth = bandwidth;
queue_event(client, &e->event,
&e->resource, sizeof(e->resource), NULL, 0);
if (free) {
cancel_delayed_work(&r->work);
kfree(r->e_alloc);
kfree(r->e_dealloc);
kfree(r);
}
out:
client_put(client);
}
static void schedule_iso_resource(struct iso_resource *r)
{
client_get(r->client);
if (!schedule_delayed_work(&r->work, 0))
client_put(r->client);
}
static void release_iso_resource(struct client *client,
struct client_resource *resource)
{
struct iso_resource *r =
container_of(resource, struct iso_resource, resource);
spin_lock_irq(&client->lock);
r->todo = ISO_RES_DEALLOC;
schedule_iso_resource(r);
spin_unlock_irq(&client->lock);
}
static int init_iso_resource(struct client *client,
struct fw_cdev_allocate_iso_resource *request, int todo)
{
struct iso_resource_event *e1, *e2;
struct iso_resource *r;
int ret;
if ((request->channels == 0 && request->bandwidth == 0) ||
request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
request->bandwidth < 0)
return -EINVAL;
r = kmalloc(sizeof(*r), GFP_KERNEL);
e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
if (r == NULL || e1 == NULL || e2 == NULL) {
ret = -ENOMEM;
goto fail;
}
INIT_DELAYED_WORK(&r->work, iso_resource_work);
r->client = client;
r->todo = todo;
r->generation = -1;
r->channels = request->channels;
r->bandwidth = request->bandwidth;
r->e_alloc = e1;
r->e_dealloc = e2;
e1->resource.closure = request->closure;
e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
e2->resource.closure = request->closure;
e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
if (todo == ISO_RES_ALLOC) {
r->resource.release = release_iso_resource;
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
if (ret < 0)
goto fail;
} else {
r->resource.release = NULL;
r->resource.handle = -1;
schedule_iso_resource(r);
}
request->handle = r->resource.handle;
return 0;
fail:
kfree(r);
kfree(e1);
kfree(e2);
return ret;
}
static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
{
struct fw_cdev_allocate_iso_resource *request = buffer;
return init_iso_resource(client, request, ISO_RES_ALLOC);
}
static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
{
struct fw_cdev_deallocate *request = buffer;
return release_client_resource(client, request->handle,
release_iso_resource, NULL);
}
static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
{
struct fw_cdev_allocate_iso_resource *request = buffer;
return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
}
static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
{
struct fw_cdev_allocate_iso_resource *request = buffer;
return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
}
/*
* Returns a speed code: Maximum speed to or from this device,
* limited by the device's link speed, the local node's link speed,
* and all PHY port speeds between the two links.
*/
static int ioctl_get_speed(struct client *client, void *buffer)
{
return client->device->max_speed;
}
static int ioctl_send_broadcast_request(struct client *client, void *buffer)
{
struct fw_cdev_send_request *request = buffer;
switch (request->tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
break;
default:
return -EINVAL;
}
/* Security policy: Only allow accesses to Units Space. */
if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
return -EACCES;
return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
}
static int ioctl_send_stream_packet(struct client *client, void *buffer)
{
struct fw_cdev_send_stream_packet *p = buffer;
struct fw_cdev_send_request request;
int dest;
if (p->speed > client->device->card->link_speed ||
p->length > 1024 << p->speed)
return -EIO;
if (p->tag > 3 || p->channel > 63 || p->sy > 15)
return -EINVAL;
dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
request.tcode = TCODE_STREAM_DATA;
request.length = p->length;
request.closure = p->closure;
request.data = p->data;
request.generation = p->generation;
return init_request(client, &request, dest, p->speed);
}
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_get_info, ioctl_get_info,
ioctl_send_request, ioctl_send_request,
...@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ...@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_start_iso, ioctl_start_iso,
ioctl_stop_iso, ioctl_stop_iso,
ioctl_get_cycle_timer, ioctl_get_cycle_timer,
ioctl_allocate_iso_resource,
ioctl_deallocate_iso_resource,
ioctl_allocate_iso_resource_once,
ioctl_deallocate_iso_resource_once,
ioctl_get_speed,
ioctl_send_broadcast_request,
ioctl_send_stream_packet,
}; };
static int static int dispatch_ioctl(struct client *client,
dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) unsigned int cmd, void __user *arg)
{ {
char buffer[256]; char buffer[256];
int retval; int ret;
if (_IOC_TYPE(cmd) != '#' || if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
...@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) ...@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
return -EFAULT; return -EFAULT;
} }
retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
if (retval < 0) if (ret < 0)
return retval; return ret;
if (_IOC_DIR(cmd) & _IOC_READ) { if (_IOC_DIR(cmd) & _IOC_READ) {
if (_IOC_SIZE(cmd) > sizeof(buffer) || if (_IOC_SIZE(cmd) > sizeof(buffer) ||
...@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) ...@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
return -EFAULT; return -EFAULT;
} }
return retval; return ret;
} }
static long static long fw_device_op_ioctl(struct file *file,
fw_device_op_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{ {
struct client *client = file->private_data; struct client *client = file->private_data;
...@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file, ...@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file,
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static long static long fw_device_op_compat_ioctl(struct file *file,
fw_device_op_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{ {
struct client *client = file->private_data; struct client *client = file->private_data;
...@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
struct client *client = file->private_data; struct client *client = file->private_data;
enum dma_data_direction direction; enum dma_data_direction direction;
unsigned long size; unsigned long size;
int page_count, retval; int page_count, ret;
if (fw_device_is_shutdown(client->device)) if (fw_device_is_shutdown(client->device))
return -ENODEV; return -ENODEV;
...@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
else else
direction = DMA_FROM_DEVICE; direction = DMA_FROM_DEVICE;
retval = fw_iso_buffer_init(&client->buffer, client->device->card, ret = fw_iso_buffer_init(&client->buffer, client->device->card,
page_count, direction); page_count, direction);
if (retval < 0) if (ret < 0)
return retval; return ret;
retval = fw_iso_buffer_map(&client->buffer, vma); ret = fw_iso_buffer_map(&client->buffer, vma);
if (retval < 0) if (ret < 0)
fw_iso_buffer_destroy(&client->buffer, client->device->card); fw_iso_buffer_destroy(&client->buffer, client->device->card);
return retval; return ret;
}
static int shutdown_resource(int id, void *p, void *data)
{
struct client_resource *r = p;
struct client *client = data;
r->release(client, r);
client_put(client);
return 0;
} }
static int fw_device_op_release(struct inode *inode, struct file *file) static int fw_device_op_release(struct inode *inode, struct file *file)
{ {
struct client *client = file->private_data; struct client *client = file->private_data;
struct event *e, *next_e; struct event *e, *next_e;
struct client_resource *r, *next_r;
unsigned long flags;
if (client->buffer.pages) mutex_lock(&client->device->client_list_mutex);
fw_iso_buffer_destroy(&client->buffer, client->device->card); list_del(&client->link);
mutex_unlock(&client->device->client_list_mutex);
if (client->iso_context) if (client->iso_context)
fw_iso_context_destroy(client->iso_context); fw_iso_context_destroy(client->iso_context);
list_for_each_entry_safe(r, next_r, &client->resource_list, link) if (client->buffer.pages)
r->release(client, r); fw_iso_buffer_destroy(&client->buffer, client->device->card);
/* /* Freeze client->resource_idr and client->event_list */
* FIXME: We should wait for the async tasklets to stop spin_lock_irq(&client->lock);
* running before freeing the memory. client->in_shutdown = true;
*/ spin_unlock_irq(&client->lock);
idr_for_each(&client->resource_idr, shutdown_resource, client);
idr_remove_all(&client->resource_idr);
idr_destroy(&client->resource_idr);
list_for_each_entry_safe(e, next_e, &client->event_list, link) list_for_each_entry_safe(e, next_e, &client->event_list, link)
kfree(e); kfree(e);
spin_lock_irqsave(&client->device->card->lock, flags); client_put(client);
list_del(&client->link);
spin_unlock_irqrestore(&client->device->card->lock, flags);
fw_device_put(client->device);
kfree(client);
return 0; return 0;
} }
......
...@@ -18,22 +18,26 @@ ...@@ -18,22 +18,26 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/module.h> #include <linux/ctype.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kthread.h>
#include <linux/device.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/string.h> #include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <asm/system.h> #include <asm/system.h>
#include <linux/ctype.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h" #include "fw-device.h"
#include "fw-topology.h"
#include "fw-transaction.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
{ {
...@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) ...@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
vendor, model, specifier_id, version); vendor, model, specifier_id, version);
} }
static int static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
{ {
struct fw_unit *unit = fw_unit(dev); struct fw_unit *unit = fw_unit(dev);
char modalias[64]; char modalias[64];
...@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = { ...@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = {
}; };
EXPORT_SYMBOL(fw_bus_type); EXPORT_SYMBOL(fw_bus_type);
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
kfree(device);
fw_card_put(card);
}
int fw_device_enable_phys_dma(struct fw_device *device) int fw_device_enable_phys_dma(struct fw_device *device)
{ {
int generation = device->generation; int generation = device->generation;
...@@ -191,8 +173,8 @@ struct config_rom_attribute { ...@@ -191,8 +173,8 @@ struct config_rom_attribute {
u32 key; u32 key;
}; };
static ssize_t static ssize_t show_immediate(struct device *dev,
show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) struct device_attribute *dattr, char *buf)
{ {
struct config_rom_attribute *attr = struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr); container_of(dattr, struct config_rom_attribute, attr);
...@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) ...@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
#define IMMEDIATE_ATTR(name, key) \ #define IMMEDIATE_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_immediate, NULL), key } { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
static ssize_t static ssize_t show_text_leaf(struct device *dev,
show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) struct device_attribute *dattr, char *buf)
{ {
struct config_rom_attribute *attr = struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr); container_of(dattr, struct config_rom_attribute, attr);
...@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = { ...@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = {
TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
}; };
static void static void init_fw_attribute_group(struct device *dev,
init_fw_attribute_group(struct device *dev, struct device_attribute *attrs,
struct device_attribute *attrs, struct fw_attribute_group *group)
struct fw_attribute_group *group)
{ {
struct device_attribute *attr; struct device_attribute *attr;
int i, j; int i, j;
...@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev, ...@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev,
dev->groups = group->groups; dev->groups = group->groups;
} }
static ssize_t static ssize_t modalias_show(struct device *dev,
modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
struct device_attribute *attr, char *buf)
{ {
struct fw_unit *unit = fw_unit(dev); struct fw_unit *unit = fw_unit(dev);
int length; int length;
...@@ -332,9 +312,8 @@ modalias_show(struct device *dev, ...@@ -332,9 +312,8 @@ modalias_show(struct device *dev,
return length + 1; return length + 1;
} }
static ssize_t static ssize_t rom_index_show(struct device *dev,
rom_index_show(struct device *dev, struct device_attribute *attr, char *buf)
struct device_attribute *attr, char *buf)
{ {
struct fw_device *device = fw_device(dev->parent); struct fw_device *device = fw_device(dev->parent);
struct fw_unit *unit = fw_unit(dev); struct fw_unit *unit = fw_unit(dev);
...@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = { ...@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = {
__ATTR_NULL, __ATTR_NULL,
}; };
static ssize_t static ssize_t config_rom_show(struct device *dev,
config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct fw_device *device = fw_device(dev); struct fw_device *device = fw_device(dev);
size_t length; size_t length;
...@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) ...@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
return length; return length;
} }
static ssize_t static ssize_t guid_show(struct device *dev,
guid_show(struct device *dev, struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct fw_device *device = fw_device(dev); struct fw_device *device = fw_device(dev);
int ret; int ret;
...@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = { ...@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = {
__ATTR_NULL, __ATTR_NULL,
}; };
static int static int read_rom(struct fw_device *device,
read_rom(struct fw_device *device, int generation, int index, u32 *data) int generation, int index, u32 *data)
{ {
int rcode; int rcode;
...@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation) ...@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
kfree(old_rom); kfree(old_rom);
ret = 0; ret = 0;
device->cmc = rom[2] & 1 << 30; device->cmc = rom[2] >> 30 & 1;
out: out:
kfree(rom); kfree(rom);
...@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work) ...@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work)
fw_device_put(device); fw_device_put(device);
} }
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
kfree(device);
fw_card_put(card);
}
static struct device_type fw_device_type = { static struct device_type fw_device_type = {
.release = fw_device_release, .release = fw_device_release,
}; };
static void fw_device_update(struct work_struct *work); static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_driver *driver = (struct fw_driver *)dev->driver;
if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
down(&dev->sem);
driver->update(unit);
up(&dev->sem);
}
return 0;
}
static void fw_device_update(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
}
/* /*
* If a device was pending for deletion because its node went away but its * If a device was pending for deletion because its node went away but its
...@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data) ...@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data)
return match; return match;
} }
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
__be32 data;
int rcode;
if (!card->broadcast_channel_allocated)
return;
if (device->bc_implemented == BC_UNKNOWN) {
rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
switch (rcode) {
case RCODE_COMPLETE:
if (data & cpu_to_be32(1 << 31)) {
device->bc_implemented = BC_IMPLEMENTED;
break;
}
/* else fall through to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
}
if (device->bc_implemented == BC_IMPLEMENTED) {
data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
BROADCAST_CHANNEL_VALID);
fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
}
}
static void fw_device_init(struct work_struct *work) static void fw_device_init(struct work_struct *work)
{ {
struct fw_device *device = struct fw_device *device =
container_of(work, struct fw_device, work.work); container_of(work, struct fw_device, work.work);
struct device *revived_dev; struct device *revived_dev;
int minor, err; int minor, ret;
/* /*
* All failure paths here set node->data to NULL, so that we * All failure paths here set node->data to NULL, so that we
...@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work) ...@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work)
fw_device_get(device); fw_device_get(device);
down_write(&fw_device_rwsem); down_write(&fw_device_rwsem);
err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
idr_get_new(&fw_device_idr, device, &minor) : idr_get_new(&fw_device_idr, device, &minor) :
-ENOMEM; -ENOMEM;
up_write(&fw_device_rwsem); up_write(&fw_device_rwsem);
if (err < 0) if (ret < 0)
goto error; goto error;
device->device.bus = &fw_bus_type; device->device.bus = &fw_bus_type;
...@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work) ...@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work)
device->config_rom[3], device->config_rom[4], device->config_rom[3], device->config_rom[4],
1 << device->max_speed); 1 << device->max_speed);
device->config_rom_retries = 0; device->config_rom_retries = 0;
fw_device_set_broadcast_channel(device, device->generation);
} }
/* /*
...@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work) ...@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work)
put_device(&device->device); /* our reference */ put_device(&device->device); /* our reference */
} }
static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_driver *driver = (struct fw_driver *)dev->driver;
if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
down(&dev->sem);
driver->update(unit);
up(&dev->sem);
}
return 0;
}
static void fw_device_update(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
}
enum { enum {
REREAD_BIB_ERROR, REREAD_BIB_ERROR,
REREAD_BIB_GONE, REREAD_BIB_GONE,
...@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation) ...@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation)
if (i == 0 && q == 0) if (i == 0 && q == 0)
return REREAD_BIB_GONE; return REREAD_BIB_GONE;
if (i > device->config_rom_length || q != device->config_rom[i]) if (q != device->config_rom[i])
return REREAD_BIB_CHANGED; return REREAD_BIB_CHANGED;
} }
...@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) ...@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device->node = fw_node_get(node); device->node = fw_node_get(node);
device->node_id = node->node_id; device->node_id = node->node_id;
device->generation = card->generation; device->generation = card->generation;
mutex_init(&device->client_list_mutex);
INIT_LIST_HEAD(&device->client_list); INIT_LIST_HEAD(&device->client_list);
/* /*
......
...@@ -19,10 +19,17 @@ ...@@ -19,10 +19,17 @@
#ifndef __fw_device_h #ifndef __fw_device_h
#define __fw_device_h #define __fw_device_h
#include <linux/device.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <asm/atomic.h> #include <asm/atomic.h>
enum fw_device_state { enum fw_device_state {
...@@ -38,6 +45,9 @@ struct fw_attribute_group { ...@@ -38,6 +45,9 @@ struct fw_attribute_group {
struct attribute *attrs[11]; struct attribute *attrs[11];
}; };
struct fw_node;
struct fw_card;
/* /*
* Note, fw_device.generation always has to be read before fw_device.node_id. * Note, fw_device.generation always has to be read before fw_device.node_id.
* Use SMP memory barriers to ensure this. Otherwise requests will be sent * Use SMP memory barriers to ensure this. Otherwise requests will be sent
...@@ -61,13 +71,18 @@ struct fw_device { ...@@ -61,13 +71,18 @@ struct fw_device {
int node_id; int node_id;
int generation; int generation;
unsigned max_speed; unsigned max_speed;
bool cmc;
struct fw_card *card; struct fw_card *card;
struct device device; struct device device;
struct mutex client_list_mutex;
struct list_head client_list; struct list_head client_list;
u32 *config_rom; u32 *config_rom;
size_t config_rom_length; size_t config_rom_length;
int config_rom_retries; int config_rom_retries;
unsigned cmc:1;
unsigned bc_implemented:2;
struct delayed_work work; struct delayed_work work;
struct fw_attribute_group attribute_group; struct fw_attribute_group attribute_group;
}; };
...@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device) ...@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device)
struct fw_device *fw_device_get_by_devt(dev_t devt); struct fw_device *fw_device_get_by_devt(dev_t devt);
int fw_device_enable_phys_dma(struct fw_device *device); int fw_device_enable_phys_dma(struct fw_device *device);
void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
void fw_device_cdev_update(struct fw_device *device); void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device); void fw_device_cdev_remove(struct fw_device *device);
...@@ -176,8 +192,7 @@ struct fw_driver { ...@@ -176,8 +192,7 @@ struct fw_driver {
const struct fw_device_id *id_table; const struct fw_device_id *id_table;
}; };
static inline struct fw_driver * static inline struct fw_driver *fw_driver(struct device_driver *drv)
fw_driver(struct device_driver *drv)
{ {
return container_of(drv, struct fw_driver, driver); return container_of(drv, struct fw_driver, driver);
} }
......
/* /*
* Isochronous IO functionality * Isochronous I/O functionality:
* - Isochronous DMA context management
* - Isochronous bus resource management (channels, bandwidth), client side
* *
* Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
...@@ -18,21 +20,25 @@ ...@@ -18,21 +20,25 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/vmalloc.h> #include <linux/errno.h>
#include <linux/firewire-constants.h>
#include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include "fw-transaction.h"
#include "fw-topology.h" #include "fw-topology.h"
#include "fw-device.h" #include "fw-transaction.h"
int /*
fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, * Isochronous DMA context management
int page_count, enum dma_data_direction direction) */
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction)
{ {
int i, j, retval = -ENOMEM; int i, j;
dma_addr_t address; dma_addr_t address;
buffer->page_count = page_count; buffer->page_count = page_count;
...@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, ...@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
kfree(buffer->pages); kfree(buffer->pages);
out: out:
buffer->pages = NULL; buffer->pages = NULL;
return retval;
return -ENOMEM;
} }
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{ {
unsigned long uaddr; unsigned long uaddr;
int i, retval; int i, err;
uaddr = vma->vm_start; uaddr = vma->vm_start;
for (i = 0; i < buffer->page_count; i++) { for (i = 0; i < buffer->page_count; i++) {
retval = vm_insert_page(vma, uaddr, buffer->pages[i]); err = vm_insert_page(vma, uaddr, buffer->pages[i]);
if (retval) if (err)
return retval; return err;
uaddr += PAGE_SIZE; uaddr += PAGE_SIZE;
} }
...@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, ...@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
buffer->pages = NULL; buffer->pages = NULL;
} }
struct fw_iso_context * struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
fw_iso_context_create(struct fw_card *card, int type, int type, int channel, int speed, size_t header_size,
int channel, int speed, size_t header_size, fw_iso_callback_t callback, void *callback_data)
fw_iso_callback_t callback, void *callback_data)
{ {
struct fw_iso_context *ctx; struct fw_iso_context *ctx;
ctx = card->driver->allocate_iso_context(card, type, header_size); ctx = card->driver->allocate_iso_context(card,
type, channel, header_size);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;
...@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx) ...@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx); card->driver->free_iso_context(ctx);
} }
int int fw_iso_context_start(struct fw_iso_context *ctx,
fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) int cycle, int sync, int tags)
{ {
return ctx->card->driver->start_iso(ctx, cycle, sync, tags); return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
} }
int int fw_iso_context_queue(struct fw_iso_context *ctx,
fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_packet *packet,
struct fw_iso_packet *packet, struct fw_iso_buffer *buffer,
struct fw_iso_buffer *buffer, unsigned long payload)
unsigned long payload)
{ {
struct fw_card *card = ctx->card; struct fw_card *card = ctx->card;
return card->driver->queue_iso(ctx, packet, buffer, payload); return card->driver->queue_iso(ctx, packet, buffer, payload);
} }
int int fw_iso_context_stop(struct fw_iso_context *ctx)
fw_iso_context_stop(struct fw_iso_context *ctx)
{ {
return ctx->card->driver->stop_iso(ctx); return ctx->card->driver->stop_iso(ctx);
} }
/*
* Isochronous bus resource management (channels, bandwidth), client side
*/
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
int bandwidth, bool allocate)
{
__be32 data[2];
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
/*
* On a 1394a IRM with low contention, try < 1 is enough.
* On a 1394-1995 IRM, we need at least try < 2.
* Let's just do try < 5.
*/
for (try = 0; try < 5; try++) {
new = allocate ? old - bandwidth : old + bandwidth;
if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
break;
data[0] = cpu_to_be32(old);
data[1] = cpu_to_be32(new);
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
data, sizeof(data))) {
case RCODE_GENERATION:
/* A generation change frees all bandwidth. */
return allocate ? -EAGAIN : bandwidth;
case RCODE_COMPLETE:
if (be32_to_cpup(data) == old)
return bandwidth;
old = be32_to_cpup(data);
/* Fall through. */
}
}
return -EIO;
}
static int manage_channel(struct fw_card *card, int irm_id, int generation,
u32 channels_mask, u64 offset, bool allocate)
{
__be32 data[2], c, all, old;
int i, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
for (i = 0; i < 32; i++) {
if (!(channels_mask & 1 << i))
continue;
c = cpu_to_be32(1 << (31 - i));
if ((old & c) != (all & c))
continue;
data[0] = old;
data[1] = old ^ c;
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
offset, data, sizeof(data))) {
case RCODE_GENERATION:
/* A generation change frees all channels. */
return allocate ? -EAGAIN : i;
case RCODE_COMPLETE:
if (data[0] == old)
return i;
old = data[0];
/* Is the IRM 1394a-2000 compliant? */
if ((data[0] & c) == (data[1] & c))
continue;
/* 1394-1995 IRM, fall through to retry. */
default:
if (retry--)
i--;
}
}
return -EIO;
}
static void deallocate_channel(struct fw_card *card, int irm_id,
int generation, int channel)
{
u32 mask;
u64 offset;
mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
manage_channel(card, irm_id, generation, mask, offset, false);
}
/**
* fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
* This function blocks (sleeps) during communication with the IRM.
*
* Allocates or deallocates at most one channel out of channels_mask.
* channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
* (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
* channel 0 and LSB for channel 63.)
* Allocates or deallocates as many bandwidth allocation units as specified.
*
* Returns channel < 0 if no channel was allocated or deallocated.
* Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
*
* If generation is stale, deallocations succeed but allocations fail with
* channel = -EAGAIN.
*
* If channel allocation fails, no bandwidth will be allocated either.
* If bandwidth allocation fails, no channel will be allocated either.
* But deallocations of channel and bandwidth are tried independently
* of each other's success.
*/
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
bool allocate)
{
u32 channels_hi = channels_mask; /* channels 31...0 */
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
int irm_id, ret, c = -EINVAL;
spin_lock_irq(&card->lock);
irm_id = card->irm_node->node_id;
spin_unlock_irq(&card->lock);
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
if (channels_lo && c < 0) {
c = manage_channel(card, irm_id, generation, channels_lo,
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
if (c >= 0)
c += 32;
}
*channel = c;
if (allocate && channels_mask != 0 && c < 0)
*bandwidth = 0;
if (*bandwidth == 0)
return;
ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
if (ret < 0)
*bandwidth = 0;
if (allocate && ret < 0 && c >= 0) {
deallocate_channel(card, irm_id, generation, c);
*channel = ret;
}
}
...@@ -205,6 +205,7 @@ struct fw_ohci { ...@@ -205,6 +205,7 @@ struct fw_ohci {
u32 it_context_mask; u32 it_context_mask;
struct iso_context *it_context_list; struct iso_context *it_context_list;
u64 ir_context_channels;
u32 ir_context_mask; u32 ir_context_mask;
struct iso_context *ir_context_list; struct iso_context *ir_context_list;
}; };
...@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci) ...@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version); reg_read(ohci, OHCI1394_Version);
} }
static int static int ohci_update_phy_reg(struct fw_card *card, int addr,
ohci_update_phy_reg(struct fw_card *card, int addr, int clear_bits, int set_bits)
int clear_bits, int set_bits)
{ {
struct fw_ohci *ohci = fw_ohci(card); struct fw_ohci *ohci = fw_ohci(card);
u32 val, old; u32 val, old;
...@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data) ...@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data)
} }
} }
static int static int ar_context_init(struct ar_context *ctx,
ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) struct fw_ohci *ohci, u32 regs)
{ {
struct ar_buffer ab; struct ar_buffer ab;
...@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx) ...@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx)
flush_writes(ctx->ohci); flush_writes(ctx->ohci);
} }
static struct descriptor * static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
find_branch_descriptor(struct descriptor *d, int z)
{ {
int b, key; int b, key;
...@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data) ...@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data)
* Allocate a new buffer and add it to the list of free buffers for this * Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held. * context. Must be called with ohci->lock held.
*/ */
static int static int context_add_buffer(struct context *ctx)
context_add_buffer(struct context *ctx)
{ {
struct descriptor_buffer *desc; struct descriptor_buffer *desc;
dma_addr_t uninitialized_var(bus_addr); dma_addr_t uninitialized_var(bus_addr);
...@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx) ...@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
return 0; return 0;
} }
static int static int context_init(struct context *ctx, struct fw_ohci *ohci,
context_init(struct context *ctx, struct fw_ohci *ohci, u32 regs, descriptor_callback_t callback)
u32 regs, descriptor_callback_t callback)
{ {
ctx->ohci = ohci; ctx->ohci = ohci;
ctx->regs = regs; ctx->regs = regs;
...@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci, ...@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
return 0; return 0;
} }
static void static void context_release(struct context *ctx)
context_release(struct context *ctx)
{ {
struct fw_card *card = &ctx->ohci->card; struct fw_card *card = &ctx->ohci->card;
struct descriptor_buffer *desc, *tmp; struct descriptor_buffer *desc, *tmp;
...@@ -827,8 +823,8 @@ context_release(struct context *ctx) ...@@ -827,8 +823,8 @@ context_release(struct context *ctx)
} }
/* Must be called with ohci->lock held */ /* Must be called with ohci->lock held */
static struct descriptor * static struct descriptor *context_get_descriptors(struct context *ctx,
context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) int z, dma_addr_t *d_bus)
{ {
struct descriptor *d = NULL; struct descriptor *d = NULL;
struct descriptor_buffer *desc = ctx->buffer_tail; struct descriptor_buffer *desc = ctx->buffer_tail;
...@@ -912,8 +908,8 @@ struct driver_data { ...@@ -912,8 +908,8 @@ struct driver_data {
* Must always be called with the ochi->lock held to ensure proper * Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation. * generation handling and locking around packet queue manipulation.
*/ */
static int static int at_context_queue_packet(struct context *ctx,
at_context_queue_packet(struct context *ctx, struct fw_packet *packet) struct fw_packet *packet)
{ {
struct fw_ohci *ohci = ctx->ohci; struct fw_ohci *ohci = ctx->ohci;
dma_addr_t d_bus, uninitialized_var(payload_bus); dma_addr_t d_bus, uninitialized_var(payload_bus);
...@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) ...@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
*/ */
header = (__le32 *) &d[1]; header = (__le32 *) &d[1];
if (packet->header_length > 8) { switch (packet->header_length) {
case 16:
case 12:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) | header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16)); (packet->speed << 16));
header[1] = cpu_to_le32((packet->header[1] & 0xffff) | header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
...@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) ...@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
header[3] = (__force __le32) packet->header[3]; header[3] = (__force __le32) packet->header[3];
d[0].req_count = cpu_to_le16(packet->header_length); d[0].req_count = cpu_to_le16(packet->header_length);
} else { break;
case 8:
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
(packet->speed << 16)); (packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0]); header[1] = cpu_to_le32(packet->header[0]);
header[2] = cpu_to_le32(packet->header[1]); header[2] = cpu_to_le32(packet->header[1]);
d[0].req_count = cpu_to_le16(12); d[0].req_count = cpu_to_le16(12);
break;
case 4:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
d[0].req_count = cpu_to_le16(8);
break;
default:
/* BUG(); */
packet->ack = RCODE_SEND_ERROR;
return -1;
} }
driver_data = (struct driver_data *) &d[3]; driver_data = (struct driver_data *) &d[3];
...@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context, ...@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context,
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
static void static void handle_local_rom(struct fw_ohci *ohci,
handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) struct fw_packet *packet, u32 csr)
{ {
struct fw_packet response; struct fw_packet response;
int tcode, length, i; int tcode, length, i;
...@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) ...@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
fw_core_handle_response(&ohci->card, &response); fw_core_handle_response(&ohci->card, &response);
} }
static void static void handle_local_lock(struct fw_ohci *ohci,
handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) struct fw_packet *packet, u32 csr)
{ {
struct fw_packet response; struct fw_packet response;
int tcode, length, ext_tcode, sel; int tcode, length, ext_tcode, sel;
...@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) ...@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
fw_core_handle_response(&ohci->card, &response); fw_core_handle_response(&ohci->card, &response);
} }
static void static void handle_local_request(struct context *ctx, struct fw_packet *packet)
handle_local_request(struct context *ctx, struct fw_packet *packet)
{ {
u64 offset; u64 offset;
u32 csr; u32 csr;
...@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet) ...@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
} }
} }
static void static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
at_context_transmit(struct context *ctx, struct fw_packet *packet)
{ {
unsigned long flags; unsigned long flags;
int retval; int ret;
spin_lock_irqsave(&ctx->ohci->lock, flags); spin_lock_irqsave(&ctx->ohci->lock, flags);
...@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet) ...@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
return; return;
} }
retval = at_context_queue_packet(ctx, packet); ret = at_context_queue_packet(ctx, packet);
spin_unlock_irqrestore(&ctx->ohci->lock, flags); spin_unlock_irqrestore(&ctx->ohci->lock, flags);
if (retval < 0) if (ret < 0)
packet->callback(packet, &ctx->ohci->card, packet->ack); packet->callback(packet, &ctx->ohci->card, packet->ack);
} }
...@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) ...@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
return 0; return 0;
} }
static int static int ohci_set_config_rom(struct fw_card *card,
ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) u32 *config_rom, size_t length)
{ {
struct fw_ohci *ohci; struct fw_ohci *ohci;
unsigned long flags; unsigned long flags;
int retval = -EBUSY; int ret = -EBUSY;
__be32 *next_config_rom; __be32 *next_config_rom;
dma_addr_t uninitialized_var(next_config_rom_bus); dma_addr_t uninitialized_var(next_config_rom_bus);
...@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) ...@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
reg_write(ohci, OHCI1394_ConfigROMmap, reg_write(ohci, OHCI1394_ConfigROMmap,
ohci->next_config_rom_bus); ohci->next_config_rom_bus);
retval = 0; ret = 0;
} }
spin_unlock_irqrestore(&ohci->lock, flags); spin_unlock_irqrestore(&ohci->lock, flags);
...@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) ...@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
* controller could need to access it before the bus reset * controller could need to access it before the bus reset
* takes effect. * takes effect.
*/ */
if (retval == 0) if (ret == 0)
fw_core_initiate_bus_reset(&ohci->card, 1); fw_core_initiate_bus_reset(&ohci->card, 1);
else else
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus); next_config_rom, next_config_rom_bus);
return retval; return ret;
} }
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
...@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) ...@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
struct fw_ohci *ohci = fw_ohci(card); struct fw_ohci *ohci = fw_ohci(card);
struct context *ctx = &ohci->at_request_ctx; struct context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data; struct driver_data *driver_data = packet->driver_data;
int retval = -ENOENT; int ret = -ENOENT;
tasklet_disable(&ctx->tasklet); tasklet_disable(&ctx->tasklet);
...@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) ...@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
driver_data->packet = NULL; driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED; packet->ack = RCODE_CANCELLED;
packet->callback(packet, &ohci->card, packet->ack); packet->callback(packet, &ohci->card, packet->ack);
retval = 0; ret = 0;
out: out:
tasklet_enable(&ctx->tasklet); tasklet_enable(&ctx->tasklet);
return retval; return ret;
} }
static int static int ohci_enable_phys_dma(struct fw_card *card,
ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) int node_id, int generation)
{ {
#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
return 0; return 0;
#else #else
struct fw_ohci *ohci = fw_ohci(card); struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags; unsigned long flags;
int n, retval = 0; int n, ret = 0;
/* /*
* FIXME: Make sure this bitmask is cleared when we clear the busReset * FIXME: Make sure this bitmask is cleared when we clear the busReset
...@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) ...@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
spin_lock_irqsave(&ohci->lock, flags); spin_lock_irqsave(&ohci->lock, flags);
if (ohci->generation != generation) { if (ohci->generation != generation) {
retval = -ESTALE; ret = -ESTALE;
goto out; goto out;
} }
...@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) ...@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
flush_writes(ohci); flush_writes(ohci);
out: out:
spin_unlock_irqrestore(&ohci->lock, flags); spin_unlock_irqrestore(&ohci->lock, flags);
return retval;
return ret;
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
} }
static u64 static u64 ohci_get_bus_time(struct fw_card *card)
ohci_get_bus_time(struct fw_card *card)
{ {
struct fw_ohci *ohci = fw_ohci(card); struct fw_ohci *ohci = fw_ohci(card);
u32 cycle_time; u32 cycle_time;
...@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card) ...@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card)
return bus_time; return bus_time;
} }
static void copy_iso_headers(struct iso_context *ctx, void *p)
{
int i = ctx->header_length;
if (i + ctx->base.header_size > PAGE_SIZE)
return;
/*
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
* are big endian. We want to present all the headers
* as big endian, so we have to swap the first quadlet.
*/
if (ctx->base.header_size > 0)
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
if (ctx->base.header_size > 4)
*(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
if (ctx->base.header_size > 8)
memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
ctx->header_length += ctx->base.header_size;
}
static int handle_ir_dualbuffer_packet(struct context *context, static int handle_ir_dualbuffer_packet(struct context *context,
struct descriptor *d, struct descriptor *d,
struct descriptor *last) struct descriptor *last)
...@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context, ...@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
__le32 *ir_header; __le32 *ir_header;
size_t header_length; size_t header_length;
void *p, *end; void *p, *end;
int i;
if (db->first_res_count != 0 && db->second_res_count != 0) { if (db->first_res_count != 0 && db->second_res_count != 0) {
if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
...@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context, ...@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context,
header_length = le16_to_cpu(db->first_req_count) - header_length = le16_to_cpu(db->first_req_count) -
le16_to_cpu(db->first_res_count); le16_to_cpu(db->first_res_count);
i = ctx->header_length;
p = db + 1; p = db + 1;
end = p + header_length; end = p + header_length;
while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { while (p < end) {
/* copy_iso_headers(ctx, p);
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
* are big endian. We want to present all the headers
* as big endian, so we have to swap the first
* quadlet.
*/
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
i += ctx->base.header_size;
ctx->excess_bytes += ctx->excess_bytes +=
(le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
p += ctx->base.header_size + 4; p += max(ctx->base.header_size, (size_t)8);
} }
ctx->header_length = i;
ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
le16_to_cpu(db->second_res_count); le16_to_cpu(db->second_res_count);
...@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context, ...@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *pd; struct descriptor *pd;
__le32 *ir_header; __le32 *ir_header;
void *p; void *p;
int i;
for (pd = d; pd <= last; pd++) { for (pd = d; pd <= last; pd++) {
if (pd->transfer_status) if (pd->transfer_status)
...@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context, ...@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
/* Descriptor(s) not done yet, stop iteration */ /* Descriptor(s) not done yet, stop iteration */
return 0; return 0;
i = ctx->header_length; p = last + 1;
p = last + 1; copy_iso_headers(ctx, p);
if (ctx->base.header_size > 0 &&
i + ctx->base.header_size <= PAGE_SIZE) {
/*
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
* are big endian. We want to present all the headers
* as big endian, so we have to swap the first quadlet.
*/
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
ctx->header_length += ctx->base.header_size;
}
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) p; ir_header = (__le32 *) p;
...@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context, ...@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context,
return 1; return 1;
} }
static struct fw_iso_context * static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) int type, int channel, size_t header_size)
{ {
struct fw_ohci *ohci = fw_ohci(card); struct fw_ohci *ohci = fw_ohci(card);
struct iso_context *ctx, *list; struct iso_context *ctx, *list;
descriptor_callback_t callback; descriptor_callback_t callback;
u64 *channels, dont_care = ~0ULL;
u32 *mask, regs; u32 *mask, regs;
unsigned long flags; unsigned long flags;
int index, retval = -ENOMEM; int index, ret = -ENOMEM;
if (type == FW_ISO_CONTEXT_TRANSMIT) { if (type == FW_ISO_CONTEXT_TRANSMIT) {
channels = &dont_care;
mask = &ohci->it_context_mask; mask = &ohci->it_context_mask;
list = ohci->it_context_list; list = ohci->it_context_list;
callback = handle_it_packet; callback = handle_it_packet;
} else { } else {
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask; mask = &ohci->ir_context_mask;
list = ohci->ir_context_list; list = ohci->ir_context_list;
if (ohci->use_dualbuffer) if (ohci->use_dualbuffer)
...@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) ...@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
} }
spin_lock_irqsave(&ohci->lock, flags); spin_lock_irqsave(&ohci->lock, flags);
index = ffs(*mask) - 1; index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
if (index >= 0) if (index >= 0) {
*channels &= ~(1ULL << channel);
*mask &= ~(1 << index); *mask &= ~(1 << index);
}
spin_unlock_irqrestore(&ohci->lock, flags); spin_unlock_irqrestore(&ohci->lock, flags);
if (index < 0) if (index < 0)
...@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) ...@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
if (ctx->header == NULL) if (ctx->header == NULL)
goto out; goto out;
retval = context_init(&ctx->context, ohci, regs, callback); ret = context_init(&ctx->context, ohci, regs, callback);
if (retval < 0) if (ret < 0)
goto out_with_header; goto out_with_header;
return &ctx->base; return &ctx->base;
...@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) ...@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
*mask |= 1 << index; *mask |= 1 << index;
spin_unlock_irqrestore(&ohci->lock, flags); spin_unlock_irqrestore(&ohci->lock, flags);
return ERR_PTR(retval); return ERR_PTR(ret);
} }
static int ohci_start_iso(struct fw_iso_context *base, static int ohci_start_iso(struct fw_iso_context *base,
...@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base) ...@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
} else { } else {
index = ctx - ohci->ir_context_list; index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index; ohci->ir_context_mask |= 1 << index;
ohci->ir_context_channels |= 1ULL << base->channel;
} }
spin_unlock_irqrestore(&ohci->lock, flags); spin_unlock_irqrestore(&ohci->lock, flags);
} }
static int static int ohci_queue_iso_transmit(struct fw_iso_context *base,
ohci_queue_iso_transmit(struct fw_iso_context *base, struct fw_iso_packet *packet,
struct fw_iso_packet *packet, struct fw_iso_buffer *buffer,
struct fw_iso_buffer *buffer, unsigned long payload)
unsigned long payload)
{ {
struct iso_context *ctx = container_of(base, struct iso_context, base); struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *last, *pd; struct descriptor *d, *last, *pd;
...@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base, ...@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0; return 0;
} }
static int static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, struct fw_iso_packet *packet,
struct fw_iso_packet *packet, struct fw_iso_buffer *buffer,
struct fw_iso_buffer *buffer, unsigned long payload)
unsigned long payload)
{ {
struct iso_context *ctx = container_of(base, struct iso_context, base); struct iso_context *ctx = container_of(base, struct iso_context, base);
struct db_descriptor *db = NULL; struct db_descriptor *db = NULL;
...@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, ...@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
z = 2; z = 2;
/* /*
* The OHCI controller puts the status word in the header * The OHCI controller puts the isochronous header and trailer in the
* buffer too, so we need 4 extra bytes per packet. * buffer, so we need at least 8 bytes.
*/ */
packet_count = p->header_length / ctx->base.header_size; packet_count = p->header_length / ctx->base.header_size;
header_size = packet_count * (ctx->base.header_size + 4); header_size = packet_count * max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */ /* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d)); header_z = DIV_ROUND_UP(header_size, sizeof(*d));
...@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, ...@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
db = (struct db_descriptor *) d; db = (struct db_descriptor *) d;
db->control = cpu_to_le16(DESCRIPTOR_STATUS | db->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS); DESCRIPTOR_BRANCH_ALWAYS);
db->first_size = cpu_to_le16(ctx->base.header_size + 4); db->first_size =
cpu_to_le16(max(ctx->base.header_size, (size_t)8));
if (p->skip && rest == p->payload_length) { if (p->skip && rest == p->payload_length) {
db->control |= cpu_to_le16(DESCRIPTOR_WAIT); db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
db->first_req_count = db->first_size; db->first_req_count = db->first_size;
...@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, ...@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
return 0; return 0;
} }
static int static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, struct fw_iso_packet *packet,
struct fw_iso_packet *packet, struct fw_iso_buffer *buffer,
struct fw_iso_buffer *buffer, unsigned long payload)
unsigned long payload)
{ {
struct iso_context *ctx = container_of(base, struct iso_context, base); struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d = NULL, *pd = NULL; struct descriptor *d = NULL, *pd = NULL;
...@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, ...@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
int page, offset, packet_count, header_size, payload_per_buffer; int page, offset, packet_count, header_size, payload_per_buffer;
/* /*
* The OHCI controller puts the status word in the * The OHCI controller puts the isochronous header and trailer in the
* buffer too, so we need 4 extra bytes per packet. * buffer, so we need at least 8 bytes.
*/ */
packet_count = p->header_length / ctx->base.header_size; packet_count = p->header_length / ctx->base.header_size;
header_size = ctx->base.header_size + 4; header_size = max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */ /* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d)); header_z = DIV_ROUND_UP(header_size, sizeof(*d));
...@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, ...@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
return 0; return 0;
} }
static int static int ohci_queue_iso(struct fw_iso_context *base,
ohci_queue_iso(struct fw_iso_context *base, struct fw_iso_packet *packet,
struct fw_iso_packet *packet, struct fw_iso_buffer *buffer,
struct fw_iso_buffer *buffer, unsigned long payload)
unsigned long payload)
{ {
struct iso_context *ctx = container_of(base, struct iso_context, base); struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags; unsigned long flags;
int retval; int ret;
spin_lock_irqsave(&ctx->context.ohci->lock, flags); spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT) if (base->type == FW_ISO_CONTEXT_TRANSMIT)
retval = ohci_queue_iso_transmit(base, packet, buffer, payload); ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
else if (ctx->context.ohci->use_dualbuffer) else if (ctx->context.ohci->use_dualbuffer)
retval = ohci_queue_iso_receive_dualbuffer(base, packet, ret = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload); buffer, payload);
else else
retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, buffer, payload);
payload);
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
return retval; return ret;
} }
static const struct fw_card_driver ohci_driver = { static const struct fw_card_driver ohci_driver = {
...@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev) ...@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev) #define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */ #endif /* CONFIG_PPC_PMAC */
static int __devinit static int __devinit pci_probe(struct pci_dev *dev,
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
struct fw_ohci *ohci; struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version; u32 bus_options, max_receive, link_speed, version;
...@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) ...@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ohci->it_context_list = kzalloc(size, GFP_KERNEL); ohci->it_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
...@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) ...@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
reg_read(ohci, OHCI1394_GUIDLo); reg_read(ohci, OHCI1394_GUIDLo);
err = fw_card_add(&ohci->card, max_receive, link_speed, guid); err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err < 0) if (err)
goto fail_self_id; goto fail_self_id;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff); dev_name(&dev->dev), version >> 16, version & 0xff);
return 0; return 0;
fail_self_id: fail_self_id:
......
...@@ -392,20 +392,18 @@ static const struct { ...@@ -392,20 +392,18 @@ static const struct {
} }
}; };
static void static void free_orb(struct kref *kref)
free_orb(struct kref *kref)
{ {
struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
kfree(orb); kfree(orb);
} }
static void static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
sbp2_status_write(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source,
int tcode, int destination, int source, int generation, int speed,
int generation, int speed, unsigned long long offset,
unsigned long long offset, void *payload, size_t length, void *callback_data)
void *payload, size_t length, void *callback_data)
{ {
struct sbp2_logical_unit *lu = callback_data; struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb; struct sbp2_orb *orb;
...@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, ...@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
fw_send_response(card, request, RCODE_COMPLETE); fw_send_response(card, request, RCODE_COMPLETE);
} }
static void static void complete_transaction(struct fw_card *card, int rcode,
complete_transaction(struct fw_card *card, int rcode, void *payload, size_t length, void *data)
void *payload, size_t length, void *data)
{ {
struct sbp2_orb *orb = data; struct sbp2_orb *orb = data;
unsigned long flags; unsigned long flags;
...@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode, ...@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode,
kref_put(&orb->kref, free_orb); kref_put(&orb->kref, free_orb);
} }
static void static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, int node_id, int generation, u64 offset)
int node_id, int generation, u64 offset)
{ {
struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
unsigned long flags; unsigned long flags;
...@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) ...@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
return retval; return retval;
} }
static void static void complete_management_orb(struct sbp2_orb *base_orb,
complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) struct sbp2_status *status)
{ {
struct sbp2_management_orb *orb = struct sbp2_management_orb *orb =
container_of(base_orb, struct sbp2_management_orb, base); container_of(base_orb, struct sbp2_management_orb, base);
...@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ...@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
complete(&orb->done); complete(&orb->done);
} }
static int static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, int generation, int function,
int generation, int function, int lun_or_login_id, int lun_or_login_id, void *response)
void *response)
{ {
struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_management_orb *orb; struct sbp2_management_orb *orb;
...@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu) ...@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
&d, sizeof(d)); &d, sizeof(d));
} }
static void static void complete_agent_reset_write_no_wait(struct fw_card *card,
complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, int rcode, void *payload, size_t length, void *data)
void *payload, size_t length, void *data)
{ {
kfree(data); kfree(data);
} }
...@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device, ...@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
sizeof(orb->page_table), DMA_TO_DEVICE); sizeof(orb->page_table), DMA_TO_DEVICE);
} }
static unsigned int static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
{ {
int sam_status; int sam_status;
...@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) ...@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
} }
} }
static void static void complete_command_orb(struct sbp2_orb *base_orb,
complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) struct sbp2_status *status)
{ {
struct sbp2_command_orb *orb = struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base); container_of(base_orb, struct sbp2_command_orb, base);
...@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ...@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
orb->done(orb->cmd); orb->done(orb->cmd);
} }
static int static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, struct fw_device *device, struct sbp2_logical_unit *lu)
struct sbp2_logical_unit *lu)
{ {
struct scatterlist *sg = scsi_sglist(orb->cmd); struct scatterlist *sg = scsi_sglist(orb->cmd);
int i, n; int i, n;
...@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) ...@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
* This is the concatenation of target port identifier and logical unit * This is the concatenation of target port identifier and logical unit
* identifier as per SAM-2...SAM-4 annex A. * identifier as per SAM-2...SAM-4 annex A.
*/ */
static ssize_t static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, struct device_attribute *attr, char *buf)
char *buf)
{ {
struct scsi_device *sdev = to_scsi_device(dev); struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_logical_unit *lu; struct sbp2_logical_unit *lu;
......
...@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card, ...@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card,
struct fw_node * node, struct fw_node * node,
struct fw_node * parent); struct fw_node * parent);
static void static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
for_each_fw_node(struct fw_card *card, struct fw_node *root, fw_node_callback_t callback)
fw_node_callback_t callback)
{ {
struct list_head list; struct list_head list;
struct fw_node *node, *next, *child, *parent; struct fw_node *node, *next, *child, *parent;
...@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root, ...@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
fw_node_put(node); fw_node_put(node);
} }
static void static void report_lost_node(struct fw_card *card,
report_lost_node(struct fw_card *card, struct fw_node *node, struct fw_node *parent)
struct fw_node *node, struct fw_node *parent)
{ {
fw_node_event(card, node, FW_NODE_DESTROYED); fw_node_event(card, node, FW_NODE_DESTROYED);
fw_node_put(node); fw_node_put(node);
...@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card, ...@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
card->bm_retries = 0; card->bm_retries = 0;
} }
static void static void report_found_node(struct fw_card *card,
report_found_node(struct fw_card *card, struct fw_node *node, struct fw_node *parent)
struct fw_node *node, struct fw_node *parent)
{ {
int b_path = (node->phy_speed == SCODE_BETA); int b_path = (node->phy_speed == SCODE_BETA);
...@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) ...@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
* found, lost or updated. Update the nodes in the card topology tree * found, lost or updated. Update the nodes in the card topology tree
* as we go. * as we go.
*/ */
static void static void update_tree(struct fw_card *card, struct fw_node *root)
update_tree(struct fw_card *card, struct fw_node *root)
{ {
struct list_head list0, list1; struct list_head list0, list1;
struct fw_node *node0, *node1, *next1; struct fw_node *node0, *node1, *next1;
...@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root) ...@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
} }
} }
static void static void update_topology_map(struct fw_card *card,
update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) u32 *self_ids, int self_id_count)
{ {
int node_count; int node_count;
...@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) ...@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
fw_compute_block_crc(card->topology_map); fw_compute_block_crc(card->topology_map);
} }
void void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
fw_core_handle_bus_reset(struct fw_card *card, int self_id_count, u32 *self_ids)
int node_id, int generation,
int self_id_count, u32 * self_ids)
{ {
struct fw_node *local_node; struct fw_node *local_node;
unsigned long flags; unsigned long flags;
...@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card, ...@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
card->broadcast_channel_allocated = false;
card->node_id = node_id; card->node_id = node_id;
/* /*
* Update node_id before generation to prevent anybody from using * Update node_id before generation to prevent anybody from using
......
...@@ -19,6 +19,11 @@ ...@@ -19,6 +19,11 @@
#ifndef __fw_topology_h #ifndef __fw_topology_h
#define __fw_topology_h #define __fw_topology_h
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/atomic.h>
enum { enum {
FW_NODE_CREATED, FW_NODE_CREATED,
FW_NODE_UPDATED, FW_NODE_UPDATED,
...@@ -51,26 +56,22 @@ struct fw_node { ...@@ -51,26 +56,22 @@ struct fw_node {
struct fw_node *ports[0]; struct fw_node *ports[0];
}; };
static inline struct fw_node * static inline struct fw_node *fw_node_get(struct fw_node *node)
fw_node_get(struct fw_node *node)
{ {
atomic_inc(&node->ref_count); atomic_inc(&node->ref_count);
return node; return node;
} }
static inline void static inline void fw_node_put(struct fw_node *node)
fw_node_put(struct fw_node *node)
{ {
if (atomic_dec_and_test(&node->ref_count)) if (atomic_dec_and_test(&node->ref_count))
kfree(node); kfree(node);
} }
void struct fw_card;
fw_destroy_nodes(struct fw_card *card); void fw_destroy_nodes(struct fw_card *card);
int
fw_compute_block_crc(u32 *block);
int fw_compute_block_crc(u32 *block);
#endif /* __fw_topology_h */ #endif /* __fw_topology_h */
...@@ -64,10 +64,8 @@ ...@@ -64,10 +64,8 @@
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30) #define PHY_IDENTIFIER(id) ((id) << 30)
static int static int close_transaction(struct fw_transaction *transaction,
close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode)
struct fw_card *card, int rcode,
u32 *payload, size_t length)
{ {
struct fw_transaction *t; struct fw_transaction *t;
unsigned long flags; unsigned long flags;
...@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction, ...@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) { if (&t->link != &card->transaction_list) {
t->callback(card, rcode, payload, length, t->callback_data); t->callback(card, rcode, NULL, 0, t->callback_data);
return 0; return 0;
} }
...@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction, ...@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction,
* Only valid for transactions that are potentially pending (ie have * Only valid for transactions that are potentially pending (ie have
* been sent). * been sent).
*/ */
int int fw_cancel_transaction(struct fw_card *card,
fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction)
struct fw_transaction *transaction)
{ {
/* /*
* Cancel the packet transmission if it's still queued. That * Cancel the packet transmission if it's still queued. That
...@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card, ...@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card,
* if the transaction is still pending and remove it in that case. * if the transaction is still pending and remove it in that case.
*/ */
return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); return close_transaction(transaction, card, RCODE_CANCELLED);
} }
EXPORT_SYMBOL(fw_cancel_transaction); EXPORT_SYMBOL(fw_cancel_transaction);
static void static void transmit_complete_callback(struct fw_packet *packet,
transmit_complete_callback(struct fw_packet *packet, struct fw_card *card, int status)
struct fw_card *card, int status)
{ {
struct fw_transaction *t = struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet); container_of(packet, struct fw_transaction, packet);
switch (status) { switch (status) {
case ACK_COMPLETE: case ACK_COMPLETE:
close_transaction(t, card, RCODE_COMPLETE, NULL, 0); close_transaction(t, card, RCODE_COMPLETE);
break; break;
case ACK_PENDING: case ACK_PENDING:
t->timestamp = packet->timestamp; t->timestamp = packet->timestamp;
...@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet, ...@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet,
case ACK_BUSY_X: case ACK_BUSY_X:
case ACK_BUSY_A: case ACK_BUSY_A:
case ACK_BUSY_B: case ACK_BUSY_B:
close_transaction(t, card, RCODE_BUSY, NULL, 0); close_transaction(t, card, RCODE_BUSY);
break; break;
case ACK_DATA_ERROR: case ACK_DATA_ERROR:
close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); close_transaction(t, card, RCODE_DATA_ERROR);
break; break;
case ACK_TYPE_ERROR: case ACK_TYPE_ERROR:
close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); close_transaction(t, card, RCODE_TYPE_ERROR);
break; break;
default: default:
/* /*
* In this case the ack is really a juju specific * In this case the ack is really a juju specific
* rcode, so just forward that to the callback. * rcode, so just forward that to the callback.
*/ */
close_transaction(t, card, status, NULL, 0); close_transaction(t, card, status);
break; break;
} }
} }
static void static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
int destination_id, int source_id, int generation, int speed, int destination_id, int source_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length) unsigned long long offset, void *payload, size_t length)
{ {
int ext_tcode; int ext_tcode;
if (tcode == TCODE_STREAM_DATA) {
packet->header[0] =
HEADER_DATA_LENGTH(length) |
destination_id |
HEADER_TCODE(TCODE_STREAM_DATA);
packet->header_length = 4;
packet->payload = payload;
packet->payload_length = length;
goto common;
}
if (tcode > 0x10) { if (tcode > 0x10) {
ext_tcode = tcode & ~0x10; ext_tcode = tcode & ~0x10;
tcode = TCODE_LOCK_REQUEST; tcode = TCODE_LOCK_REQUEST;
...@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, ...@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_length = 0; packet->payload_length = 0;
break; break;
} }
common:
packet->speed = speed; packet->speed = speed;
packet->generation = generation; packet->generation = generation;
packet->ack = 0; packet->ack = 0;
...@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, ...@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
* @param callback function to be called when the transaction is completed * @param callback function to be called when the transaction is completed
* @param callback_data pointer to arbitrary data, which will be * @param callback_data pointer to arbitrary data, which will be
* passed to the callback * passed to the callback
*
* In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
*/ */
void void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
fw_send_request(struct fw_card *card, struct fw_transaction *t, int destination_id, int generation, int speed,
int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length,
unsigned long long offset, fw_transaction_callback_t callback, void *callback_data)
void *payload, size_t length,
fw_transaction_callback_t callback, void *callback_data)
{ {
unsigned long flags; unsigned long flags;
int tlabel; int tlabel;
...@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode, ...@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode,
* Returns the RCODE. * Returns the RCODE.
*/ */
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset, int generation, int speed, unsigned long long offset,
void *data, size_t length) void *payload, size_t length)
{ {
struct transaction_callback_data d; struct transaction_callback_data d;
struct fw_transaction t; struct fw_transaction t;
init_completion(&d.done); init_completion(&d.done);
d.payload = data; d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed, fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, data, length, transaction_callback, &d); offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done); wait_for_completion(&d.done);
return d.rcode; return d.rcode;
...@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card) ...@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card)
} }
} }
static struct fw_address_handler * static struct fw_address_handler *lookup_overlapping_address_handler(
lookup_overlapping_address_handler(struct list_head *list, struct list_head *list, unsigned long long offset, size_t length)
unsigned long long offset, size_t length)
{ {
struct fw_address_handler *handler; struct fw_address_handler *handler;
...@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list, ...@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list,
return NULL; return NULL;
} }
static struct fw_address_handler * static struct fw_address_handler *lookup_enclosing_address_handler(
lookup_enclosing_address_handler(struct list_head *list, struct list_head *list, unsigned long long offset, size_t length)
unsigned long long offset, size_t length)
{ {
struct fw_address_handler *handler; struct fw_address_handler *handler;
...@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region = ...@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region =
#endif /* 0 */ #endif /* 0 */
/** /**
* Allocate a range of addresses in the node space of the OHCI * fw_core_add_address_handler - register for incoming requests
* controller. When a request is received that falls within the * @handler: callback
* specified address range, the specified callback is invoked. The * @region: region in the IEEE 1212 node space address range
* parameters passed to the callback give the details of the *
* particular request. * region->start, ->end, and handler->length have to be quadlet-aligned.
*
* When a request is received that falls within the specified address range,
* the specified callback is invoked. The parameters passed to the callback
* give the details of the particular request.
* *
* Return value: 0 on success, non-zero otherwise. * Return value: 0 on success, non-zero otherwise.
* The start offset of the handler's address region is determined by * The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset. * fw_core_add_address_handler() and is returned in handler->offset.
* The offset is quadlet-aligned.
*/ */
int int fw_core_add_address_handler(struct fw_address_handler *handler,
fw_core_add_address_handler(struct fw_address_handler *handler, const struct fw_address_region *region)
const struct fw_address_region *region)
{ {
struct fw_address_handler *other; struct fw_address_handler *other;
unsigned long flags; unsigned long flags;
int ret = -EBUSY; int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
region->end & 0xffff000000000003ULL ||
region->start >= region->end ||
handler->length & 3 ||
handler->length == 0)
return -EINVAL;
spin_lock_irqsave(&address_handler_lock, flags); spin_lock_irqsave(&address_handler_lock, flags);
handler->offset = roundup(region->start, 4); handler->offset = region->start;
while (handler->offset + handler->length <= region->end) { while (handler->offset + handler->length <= region->end) {
other = other =
lookup_overlapping_address_handler(&address_handler_list, lookup_overlapping_address_handler(&address_handler_list,
handler->offset, handler->offset,
handler->length); handler->length);
if (other != NULL) { if (other != NULL) {
handler->offset = handler->offset += other->length;
roundup(other->offset + other->length, 4);
} else { } else {
list_add_tail(&handler->link, &address_handler_list); list_add_tail(&handler->link, &address_handler_list);
ret = 0; ret = 0;
...@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler, ...@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
EXPORT_SYMBOL(fw_core_add_address_handler); EXPORT_SYMBOL(fw_core_add_address_handler);
/** /**
* Deallocate a range of addresses allocated with fw_allocate. This * fw_core_remove_address_handler - unregister an address handler
* will call the associated callback one last time with a the special
* tcode TCODE_DEALLOCATE, to let the client destroy the registered
* callback data. For convenience, the callback parameters offset and
* length are set to the start and the length respectively for the
* deallocated region, payload is set to NULL.
*/ */
void fw_core_remove_address_handler(struct fw_address_handler *handler) void fw_core_remove_address_handler(struct fw_address_handler *handler)
{ {
...@@ -518,9 +527,8 @@ struct fw_request { ...@@ -518,9 +527,8 @@ struct fw_request {
u32 data[0]; u32 data[0];
}; };
static void static void free_response_callback(struct fw_packet *packet,
free_response_callback(struct fw_packet *packet, struct fw_card *card, int status)
struct fw_card *card, int status)
{ {
struct fw_request *request; struct fw_request *request;
...@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet, ...@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet,
kfree(request); kfree(request);
} }
void void fw_fill_response(struct fw_packet *response, u32 *request_header,
fw_fill_response(struct fw_packet *response, u32 *request_header, int rcode, void *payload, size_t length)
int rcode, void *payload, size_t length)
{ {
int tcode, tlabel, extended_tcode, source, destination; int tcode, tlabel, extended_tcode, source, destination;
...@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header, ...@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
} }
EXPORT_SYMBOL(fw_fill_response); EXPORT_SYMBOL(fw_fill_response);
static struct fw_request * static struct fw_request *allocate_request(struct fw_packet *p)
allocate_request(struct fw_packet *p)
{ {
struct fw_request *request; struct fw_request *request;
u32 *data, length; u32 *data, length;
...@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p) ...@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p)
return request; return request;
} }
void void fw_send_response(struct fw_card *card,
fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) struct fw_request *request, int rcode)
{ {
/* unified transaction or broadcast transaction: don't respond */ /* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING || if (request->ack != ACK_PENDING ||
...@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) ...@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
} }
EXPORT_SYMBOL(fw_send_response); EXPORT_SYMBOL(fw_send_response);
void void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
{ {
struct fw_address_handler *handler; struct fw_address_handler *handler;
struct fw_request *request; struct fw_request *request;
...@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p) ...@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
} }
EXPORT_SYMBOL(fw_core_handle_request); EXPORT_SYMBOL(fw_core_handle_request);
void void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{ {
struct fw_transaction *t; struct fw_transaction *t;
unsigned long flags; unsigned long flags;
...@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region = ...@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
static void static void handle_topology_map(struct fw_card *card, struct fw_request *request,
handle_topology_map(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation,
int tcode, int destination, int source, int speed, unsigned long long offset,
int generation, int speed, void *payload, size_t length, void *callback_data)
unsigned long long offset,
void *payload, size_t length, void *callback_data)
{ {
int i, start, end; int i, start, end;
__be32 *map; __be32 *map;
...@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region = ...@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE, { .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void static void handle_registers(struct fw_card *card, struct fw_request *request,
handle_registers(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation,
int tcode, int destination, int source, int speed, unsigned long long offset,
int generation, int speed, void *payload, size_t length, void *callback_data)
unsigned long long offset,
void *payload, size_t length, void *callback_data)
{ {
int reg = offset & ~CSR_REGISTER_BASE; int reg = offset & ~CSR_REGISTER_BASE;
unsigned long long bus_time; unsigned long long bus_time;
...@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = { ...@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = {
static int __init fw_core_init(void) static int __init fw_core_init(void)
{ {
int retval; int ret;
retval = bus_register(&fw_bus_type); ret = bus_register(&fw_bus_type);
if (retval < 0) if (ret < 0)
return retval; return ret;
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
if (fw_cdev_major < 0) { if (fw_cdev_major < 0) {
...@@ -951,19 +951,10 @@ static int __init fw_core_init(void) ...@@ -951,19 +951,10 @@ static int __init fw_core_init(void)
return fw_cdev_major; return fw_cdev_major;
} }
retval = fw_core_add_address_handler(&topology_map, fw_core_add_address_handler(&topology_map, &topology_map_region);
&topology_map_region); fw_core_add_address_handler(&registers, &registers_region);
BUG_ON(retval < 0); fw_core_add_descriptor(&vendor_id_descriptor);
fw_core_add_descriptor(&model_id_descriptor);
retval = fw_core_add_address_handler(&registers,
&registers_region);
BUG_ON(retval < 0);
/* Add the vendor textual descriptor. */
retval = fw_core_add_descriptor(&vendor_id_descriptor);
BUG_ON(retval < 0);
retval = fw_core_add_descriptor(&model_id_descriptor);
BUG_ON(retval < 0);
return 0; return 0;
} }
......
...@@ -82,14 +82,14 @@ ...@@ -82,14 +82,14 @@
#define CSR_SPEED_MAP 0x2000 #define CSR_SPEED_MAP 0x2000
#define CSR_SPEED_MAP_END 0x3000 #define CSR_SPEED_MAP_END 0x3000
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30) #define BROADCAST_CHANNEL_VALID (1 << 30)
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
static inline void static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
{ {
u32 *dst = _dst; u32 *dst = _dst;
__be32 *src = _src; __be32 *src = _src;
...@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size) ...@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
dst[i] = be32_to_cpu(src[i]); dst[i] = be32_to_cpu(src[i]);
} }
static inline void static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
{ {
fw_memcpy_from_be32(_dst, _src, size); fw_memcpy_from_be32(_dst, _src, size);
} }
...@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet, ...@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
struct fw_card *card, int status); struct fw_card *card, int status);
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, void *data, size_t length,
size_t length,
void *callback_data); void *callback_data);
/* /*
...@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card, ...@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card,
void *data, size_t length, void *data, size_t length,
void *callback_data); void *callback_data);
typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
int node_id, int generation,
u32 *self_ids,
int self_id_count,
void *callback_data);
struct fw_packet { struct fw_packet {
int speed; int speed;
int generation; int generation;
...@@ -187,12 +179,6 @@ struct fw_transaction { ...@@ -187,12 +179,6 @@ struct fw_transaction {
void *callback_data; void *callback_data;
}; };
static inline struct fw_packet *
fw_packet(struct list_head *l)
{
return list_entry(l, struct fw_packet, link);
}
struct fw_address_handler { struct fw_address_handler {
u64 offset; u64 offset;
size_t length; size_t length;
...@@ -201,7 +187,6 @@ struct fw_address_handler { ...@@ -201,7 +187,6 @@ struct fw_address_handler {
struct list_head link; struct list_head link;
}; };
struct fw_address_region { struct fw_address_region {
u64 start; u64 start;
u64 end; u64 end;
...@@ -255,6 +240,7 @@ struct fw_card { ...@@ -255,6 +240,7 @@ struct fw_card {
int bm_retries; int bm_retries;
int bm_generation; int bm_generation;
bool broadcast_channel_allocated;
u32 broadcast_channel; u32 broadcast_channel;
u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
}; };
...@@ -315,10 +301,8 @@ struct fw_iso_packet { ...@@ -315,10 +301,8 @@ struct fw_iso_packet {
struct fw_iso_context; struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
u32 cycle, u32 cycle, size_t header_length,
size_t header_length, void *header, void *data);
void *header,
void *data);
/* /*
* An iso buffer is just a set of pages mapped for DMA in the * An iso buffer is just a set of pages mapped for DMA in the
...@@ -344,36 +328,25 @@ struct fw_iso_context { ...@@ -344,36 +328,25 @@ struct fw_iso_context {
void *callback_data; void *callback_data;
}; };
int int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
fw_iso_buffer_init(struct fw_iso_buffer *buffer, int page_count, enum dma_data_direction direction);
struct fw_card *card, int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
int page_count, void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
enum dma_data_direction direction);
int struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); int type, int channel, int speed, size_t header_size,
void fw_iso_callback_t callback, void *callback_data);
fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_context * struct fw_iso_buffer *buffer,
fw_iso_context_create(struct fw_card *card, int type, unsigned long payload);
int channel, int speed, size_t header_size, int fw_iso_context_start(struct fw_iso_context *ctx,
fw_iso_callback_t callback, void *callback_data); int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
void void fw_iso_context_destroy(struct fw_iso_context *ctx);
fw_iso_context_destroy(struct fw_iso_context *ctx);
void fw_iso_resource_manage(struct fw_card *card, int generation,
int u64 channels_mask, int *channel, int *bandwidth, bool allocate);
fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload);
int
fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int
fw_iso_context_stop(struct fw_iso_context *ctx);
struct fw_card_driver { struct fw_card_driver {
/* /*
...@@ -415,7 +388,7 @@ struct fw_card_driver { ...@@ -415,7 +388,7 @@ struct fw_card_driver {
struct fw_iso_context * struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card, (*allocate_iso_context)(struct fw_card *card,
int type, size_t header_size); int type, int channel, size_t header_size);
void (*free_iso_context)(struct fw_iso_context *ctx); void (*free_iso_context)(struct fw_iso_context *ctx);
int (*start_iso)(struct fw_iso_context *ctx, int (*start_iso)(struct fw_iso_context *ctx,
...@@ -429,54 +402,45 @@ struct fw_card_driver { ...@@ -429,54 +402,45 @@ struct fw_card_driver {
int (*stop_iso)(struct fw_iso_context *ctx); int (*stop_iso)(struct fw_iso_context *ctx);
}; };
int int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
void void fw_send_request(struct fw_card *card, struct fw_transaction *t,
fw_send_request(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed, int tcode, int destination_id, int generation, int speed,
unsigned long long offset, void *data, size_t length, unsigned long long offset, void *payload, size_t length,
fw_transaction_callback_t callback, void *callback_data); fw_transaction_callback_t callback, void *callback_data);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *data, size_t length);
int fw_cancel_transaction(struct fw_card *card, int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction); struct fw_transaction *transaction);
void fw_flush_transactions(struct fw_card *card); void fw_flush_transactions(struct fw_card *card);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *payload, size_t length);
void fw_send_phy_config(struct fw_card *card, void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count); int node_id, int generation, int gap_count);
static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
{
return tag << 14 | channel << 8 | sy;
}
/* /*
* Called by the topology code to inform the device code of node * Called by the topology code to inform the device code of node
* activity; found, lost, or updated nodes. * activity; found, lost, or updated nodes.
*/ */
void void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* API used by card level drivers */ /* API used by card level drivers */
void void fw_card_initialize(struct fw_card *card,
fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, const struct fw_card_driver *driver, struct device *device);
struct device *device); int fw_card_add(struct fw_card *card,
int u32 max_receive, u32 link_speed, u64 guid);
fw_card_add(struct fw_card *card, void fw_core_remove_card(struct fw_card *card);
u32 max_receive, u32 link_speed, u64 guid); void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids);
void void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
fw_core_remove_card(struct fw_card *card); void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
void extern int fw_irm_set_broadcast_channel_register(struct device *dev,
fw_core_handle_bus_reset(struct fw_card *card, void *data);
int node_id, int generation,
int self_id_count, u32 *self_ids);
void
fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void
fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
#endif /* __fw_transaction_h */ #endif /* __fw_transaction_h */
...@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = { ...@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
.host_reset = host_reset, .host_reset = host_reset,
}; };
const static struct hpsb_address_ops map_ops = { static const struct hpsb_address_ops map_ops = {
.read = read_maps, .read = read_maps,
}; };
const static struct hpsb_address_ops fcp_ops = { static const struct hpsb_address_ops fcp_ops = {
.write = write_fcp, .write = write_fcp,
}; };
const static struct hpsb_address_ops reg_ops = { static const struct hpsb_address_ops reg_ops = {
.read = read_regs, .read = read_regs,
.write = write_regs, .write = write_regs,
.lock = lock_regs, .lock = lock_regs,
.lock64 = lock64_regs, .lock64 = lock64_regs,
}; };
const static struct hpsb_address_ops config_rom_ops = { static const struct hpsb_address_ops config_rom_ops = {
.read = read_config_rom, .read = read_config_rom,
}; };
......
...@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops= ...@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops=
* Export information about protocols/devices supported by this driver. * Export information about protocols/devices supported by this driver.
*/ */
#ifdef MODULE #ifdef MODULE
static struct ieee1394_device_id dv1394_id_table[] = { static const struct ieee1394_device_id dv1394_id_table[] = {
{ {
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
......
...@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host); ...@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
static void ether1394_host_reset(struct hpsb_host *host); static void ether1394_host_reset(struct hpsb_host *host);
/* Function for incoming 1394 packets */ /* Function for incoming 1394 packets */
const static struct hpsb_address_ops addr_ops = { static const struct hpsb_address_ops addr_ops = {
.write = ether1394_write, .write = ether1394_write,
}; };
...@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud) ...@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud)
return eth1394_new_node(hi, ud); return eth1394_new_node(hi, ud);
} }
static struct ieee1394_device_id eth1394_id_table[] = { static const struct ieee1394_device_id eth1394_id_table[] = {
{ {
.match_flags = (IEEE1394_MATCH_SPECIFIER_ID | .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION), IEEE1394_MATCH_VERSION),
......
...@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, ...@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return retval; return retval;
} }
const static struct hpsb_address_ops dummy_ops; static const struct hpsb_address_ops dummy_ops;
/* dummy address spaces as lower and upper bounds of the host's a.s. list */ /* dummy address spaces as lower and upper bounds of the host's a.s. list */
static void init_hpsb_highlevel(struct hpsb_host *host) static void init_hpsb_highlevel(struct hpsb_host *host)
......
...@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = { ...@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = {
static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
{ {
struct hpsb_protocol_driver *driver; struct hpsb_protocol_driver *driver;
struct ieee1394_device_id *id; const struct ieee1394_device_id *id;
int length = 0; int length = 0;
char *scratch = buf; char *scratch = buf;
...@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv) ...@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
{ {
struct hpsb_protocol_driver *driver; struct hpsb_protocol_driver *driver;
struct unit_directory *ud; struct unit_directory *ud;
struct ieee1394_device_id *id; const struct ieee1394_device_id *id;
/* We only match unit directories */ /* We only match unit directories */
if (dev->platform_data != &nodemgr_ud_platform_data) if (dev->platform_data != &nodemgr_ud_platform_data)
......
...@@ -125,7 +125,7 @@ struct hpsb_protocol_driver { ...@@ -125,7 +125,7 @@ struct hpsb_protocol_driver {
* probe function below can implement further protocol * probe function below can implement further protocol
* dependent or vendor dependent checking. * dependent or vendor dependent checking.
*/ */
struct ieee1394_device_id *id_table; const struct ieee1394_device_id *id_table;
/* /*
* The update function is called when the node has just * The update function is called when the node has just
......
...@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, ...@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags); u16 flags);
const static struct hpsb_address_ops arm_ops = { static const struct hpsb_address_ops arm_ops = {
.read = arm_read, .read = arm_read,
.write = arm_write, .write = arm_write,
.lock = arm_lock, .lock = arm_lock,
...@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf) ...@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
{ {
struct compat_raw1394_req __user *cr = (typeof(cr)) buf; struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
struct raw1394_request __user *r; struct raw1394_request __user *r;
r = compat_alloc_user_space(sizeof(struct raw1394_request)); r = compat_alloc_user_space(sizeof(struct raw1394_request));
#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
...@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf) ...@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf)
C(tag) || C(tag) ||
C(sendb) || C(sendb) ||
C(recvb)) C(recvb))
return ERR_PTR(-EFAULT); return (__force const char __user *)ERR_PTR(-EFAULT);
return (const char __user *)r; return (const char __user *)r;
} }
#undef C #undef C
...@@ -389,6 +391,7 @@ static int ...@@ -389,6 +391,7 @@ static int
raw1394_compat_read(const char __user *buf, struct raw1394_request *r) raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
{ {
struct compat_raw1394_req __user *cr = (typeof(cr)) buf; struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
P(type) || P(type) ||
P(error) || P(error) ||
...@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r) ...@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
P(sendb) || P(sendb) ||
P(recvb)) P(recvb))
return -EFAULT; return -EFAULT;
return sizeof(struct compat_raw1394_req); return sizeof(struct compat_raw1394_req);
} }
#undef P #undef P
...@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer, ...@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
sizeof(struct compat_raw1394_req) != sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) { sizeof(struct raw1394_request)) {
buffer = raw1394_compat_write(buffer); buffer = raw1394_compat_write(buffer);
if (IS_ERR(buffer)) if (IS_ERR((__force void *)buffer))
return PTR_ERR(buffer); return PTR_ERR((__force void *)buffer);
} else } else
#endif #endif
if (count != sizeof(struct raw1394_request)) { if (count != sizeof(struct raw1394_request)) {
...@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file) ...@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
* Export information about protocols/devices supported by this driver. * Export information about protocols/devices supported by this driver.
*/ */
#ifdef MODULE #ifdef MODULE
static struct ieee1394_device_id raw1394_id_table[] = { static const struct ieee1394_device_id raw1394_id_table[] = {
{ {
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
......
...@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = { ...@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
.host_reset = sbp2_host_reset, .host_reset = sbp2_host_reset,
}; };
const static struct hpsb_address_ops sbp2_ops = { static const struct hpsb_address_ops sbp2_ops = {
.write = sbp2_handle_status_write .write = sbp2_handle_status_write
}; };
...@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *, ...@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
size_t, u16); size_t, u16);
const static struct hpsb_address_ops sbp2_physdma_ops = { static const struct hpsb_address_ops sbp2_physdma_ops = {
.read = sbp2_handle_physdma_read, .read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write, .write = sbp2_handle_physdma_write,
}; };
...@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = { ...@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = {
/* /*
* Interface to driver core and IEEE 1394 core * Interface to driver core and IEEE 1394 core
*/ */
static struct ieee1394_device_id sbp2_id_table[] = { static const struct ieee1394_device_id sbp2_id_table[] = {
{ {
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
...@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, ...@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
"(firmware_revision 0x%06x, vendor_id 0x%06x," "(firmware_revision 0x%06x, vendor_id 0x%06x,"
" model_id 0x%06x)", " model_id 0x%06x)",
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
workarounds, firmware_revision, workarounds, firmware_revision, ud->vendor_id,
ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
model); model);
/* We would need one SCSI host template for each target to adjust /* We would need one SCSI host template for each target to adjust
......
...@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops= ...@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops=
* Export information about protocols/devices supported by this driver. * Export information about protocols/devices supported by this driver.
*/ */
#ifdef MODULE #ifdef MODULE
static struct ieee1394_device_id video1394_id_table[] = { static const struct ieee1394_device_id video1394_id_table[] = {
{ {
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
......
...@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype) ...@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype)
} }
static const char *debug_fcp_opcode(unsigned int opcode, static const char *debug_fcp_opcode(unsigned int opcode,
const u8 *data, size_t length) const u8 *data, int length)
{ {
switch (opcode) { switch (opcode) {
case AVC_OPCODE_VENDOR: break; case AVC_OPCODE_VENDOR: break;
...@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode, ...@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode,
case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2";
case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
} }
return "Vendor"; return "Vendor";
} }
static void debug_fcp(const u8 *data, size_t length) static void debug_fcp(const u8 *data, int length)
{ {
unsigned int subunit_type, subunit_id, op; unsigned int subunit_type, subunit_id, op;
const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
...@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv, ...@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; if (fdtv->type == FIREDTV_DVB_S2)
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
else
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
c->operand[4] = (params->frequency >> 24) & 0xff; c->operand[4] = (params->frequency >> 24) & 0xff;
c->operand[5] = (params->frequency >> 16) & 0xff; c->operand[5] = (params->frequency >> 16) & 0xff;
......
...@@ -25,10 +25,12 @@ ...@@ -25,10 +25,12 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/firewire-constants.h> #include <linux/firewire-constants.h>
#define FW_CDEV_EVENT_BUS_RESET 0x00 #define FW_CDEV_EVENT_BUS_RESET 0x00
#define FW_CDEV_EVENT_RESPONSE 0x01 #define FW_CDEV_EVENT_RESPONSE 0x01
#define FW_CDEV_EVENT_REQUEST 0x02 #define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
/** /**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
...@@ -136,7 +138,24 @@ struct fw_cdev_event_request { ...@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
* This event is sent when the controller has completed an &fw_cdev_iso_packet * This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
* stripped of all packets up until and including the interrupt packet are * stripped of all packets up until and including the interrupt packet are
* returned in the @header field. * returned in the @header field. The amount of header data per packet is as
* specified at iso context creation by &fw_cdev_create_iso_context.header_size.
*
* In version 1 of this ABI, header data consisted of the 1394 isochronous
* packet header, followed by quadlets from the packet payload if
* &fw_cdev_create_iso_context.header_size > 4.
*
* In version 2 of this ABI, header data consist of the 1394 isochronous
* packet header, followed by a timestamp quadlet if
* &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
* packet payload if &fw_cdev_create_iso_context.header_size > 8.
*
* Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
*
* Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
* 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
* 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
* order.
*/ */
struct fw_cdev_event_iso_interrupt { struct fw_cdev_event_iso_interrupt {
__u64 closure; __u64 closure;
...@@ -146,6 +165,35 @@ struct fw_cdev_event_iso_interrupt { ...@@ -146,6 +165,35 @@ struct fw_cdev_event_iso_interrupt {
__u32 header[0]; __u32 header[0];
}; };
/**
* struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
* @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
* @handle: Reference by which an allocated resource can be deallocated
* @channel: Isochronous channel which was (de)allocated, if any
* @bandwidth: Bandwidth allocation units which were (de)allocated, if any
*
* An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
* resource was allocated at the IRM. The client has to check @channel and
* @bandwidth for whether the allocation actually succeeded.
*
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
* resource was deallocated at the IRM. It is also sent when automatic
* reallocation after a bus reset failed.
*
* @channel is <0 if no channel was (de)allocated or if reallocation failed.
* @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
*/
struct fw_cdev_event_iso_resource {
__u64 closure;
__u32 type;
__u32 handle;
__s32 channel;
__s32 bandwidth;
};
/** /**
* union fw_cdev_event - Convenience union of fw_cdev_event_ types * union fw_cdev_event - Convenience union of fw_cdev_event_ types
* @common: Valid for all types * @common: Valid for all types
...@@ -153,6 +201,9 @@ struct fw_cdev_event_iso_interrupt { ...@@ -153,6 +201,9 @@ struct fw_cdev_event_iso_interrupt {
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
* @iso_resource: Valid if @common.type ==
* %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
* *
* Convenience union for userspace use. Events could be read(2) into an * Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further * appropriately aligned char buffer and then cast to this union for further
...@@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt { ...@@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt {
* not fit will be discarded so that the next read(2) will return a new event. * not fit will be discarded so that the next read(2) will return a new event.
*/ */
union fw_cdev_event { union fw_cdev_event {
struct fw_cdev_event_common common; struct fw_cdev_event_common common;
struct fw_cdev_event_bus_reset bus_reset; struct fw_cdev_event_bus_reset bus_reset;
struct fw_cdev_event_response response; struct fw_cdev_event_response response;
struct fw_cdev_event_request request; struct fw_cdev_event_request request;
struct fw_cdev_event_iso_interrupt iso_interrupt; struct fw_cdev_event_iso_interrupt iso_interrupt;
struct fw_cdev_event_iso_resource iso_resource;
}; };
#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) /* available since kernel version 2.6.22 */
#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) #define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) #define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) #define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) #define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) #define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) #define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) #define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) /* available since kernel version 2.6.24 */
#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) #define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
/* FW_CDEV_VERSION History /* available since kernel version 2.6.30 */
* #define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
* 1 Feb 18, 2007: Initial version. #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */
#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
/*
* FW_CDEV_VERSION History
* 1 (2.6.22) - initial version
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
*/ */
#define FW_CDEV_VERSION 1 #define FW_CDEV_VERSION 2
/** /**
* struct fw_cdev_get_info - General purpose information ioctl * struct fw_cdev_get_info - General purpose information ioctl
...@@ -201,7 +266,7 @@ union fw_cdev_event { ...@@ -201,7 +266,7 @@ union fw_cdev_event {
* case, @rom_length is updated with the actual length of the * case, @rom_length is updated with the actual length of the
* configuration ROM. * configuration ROM.
* @rom: If non-zero, address of a buffer to be filled by a copy of the * @rom: If non-zero, address of a buffer to be filled by a copy of the
* local node's configuration ROM * device's configuration ROM
* @bus_reset: If non-zero, address of a buffer to be filled by a * @bus_reset: If non-zero, address of a buffer to be filled by a
* &struct fw_cdev_event_bus_reset with the current state * &struct fw_cdev_event_bus_reset with the current state
* of the bus. This does not cause a bus reset to happen. * of the bus. This does not cause a bus reset to happen.
...@@ -229,7 +294,7 @@ struct fw_cdev_get_info { ...@@ -229,7 +294,7 @@ struct fw_cdev_get_info {
* Send a request to the device. This ioctl implements all outgoing requests. * Send a request to the device. This ioctl implements all outgoing requests.
* Both quadlet and block request specify the payload as a pointer to the data * Both quadlet and block request specify the payload as a pointer to the data
* in the @data field. Once the transaction completes, the kernel writes an * in the @data field. Once the transaction completes, the kernel writes an
* &fw_cdev_event_request event back. The @closure field is passed back to * &fw_cdev_event_response event back. The @closure field is passed back to
* user space in the response event. * user space in the response event.
*/ */
struct fw_cdev_send_request { struct fw_cdev_send_request {
...@@ -284,9 +349,9 @@ struct fw_cdev_allocate { ...@@ -284,9 +349,9 @@ struct fw_cdev_allocate {
}; };
/** /**
* struct fw_cdev_deallocate - Free an address range allocation * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
* @handle: Handle to the address range, as returned by the kernel when the * @handle: Handle to the address range or iso resource, as returned by the
* range was allocated * kernel when the range or resource was allocated
*/ */
struct fw_cdev_deallocate { struct fw_cdev_deallocate {
__u32 handle; __u32 handle;
...@@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset { ...@@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset {
* If successful, the kernel adds the descriptor and writes back a handle to the * If successful, the kernel adds the descriptor and writes back a handle to the
* kernel-side object to be used for later removal of the descriptor block and * kernel-side object to be used for later removal of the descriptor block and
* immediate key. * immediate key.
*
* This ioctl affects the configuration ROMs of all local nodes.
* The ioctl only succeeds on device files which represent a local node.
*/ */
struct fw_cdev_add_descriptor { struct fw_cdev_add_descriptor {
__u32 immediate; __u32 immediate;
...@@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor { ...@@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor {
* descriptor was added * descriptor was added
* *
* Remove a descriptor block and accompanying immediate key from the local * Remove a descriptor block and accompanying immediate key from the local
* node's configuration ROM. * nodes' configuration ROMs.
*/ */
struct fw_cdev_remove_descriptor { struct fw_cdev_remove_descriptor {
__u32 handle; __u32 handle;
...@@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor { ...@@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor {
* *
* If a context was successfully created, the kernel writes back a handle to the * If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context. * context, which must be passed in for subsequent operations on that context.
*
* Note that the effect of a @header_size > 4 depends on
* &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
*/ */
struct fw_cdev_create_iso_context { struct fw_cdev_create_iso_context {
__u32 type; __u32 type;
...@@ -473,10 +544,91 @@ struct fw_cdev_stop_iso { ...@@ -473,10 +544,91 @@ struct fw_cdev_stop_iso {
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
* and also the system clock. This allows to express the receive time of an * and also the system clock. This allows to express the receive time of an
* isochronous packet as a system time with microsecond accuracy. * isochronous packet as a system time with microsecond accuracy.
*
* @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
* 12 bits cycleOffset, in host byte order.
*/ */
struct fw_cdev_get_cycle_timer { struct fw_cdev_get_cycle_timer {
__u64 local_time; __u64 local_time;
__u32 cycle_timer; __u32 cycle_timer;
}; };
/**
* struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
* @closure: Passed back to userspace in correponding iso resource events
* @channels: Isochronous channels of which one is to be (de)allocated
* @bandwidth: Isochronous bandwidth units to be (de)allocated
* @handle: Handle to the allocation, written by the kernel (only valid in
* case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
*
* The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
* isochronous channel and/or of isochronous bandwidth at the isochronous
* resource manager (IRM). Only one of the channels specified in @channels is
* allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
* communication with the IRM, indicating success or failure in the event data.
* The kernel will automatically reallocate the resources after bus resets.
* Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
* will be sent. The kernel will also automatically deallocate the resources
* when the file descriptor is closed.
*
* The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
* deallocation of resources which were allocated as described above.
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
*
* The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
* without automatic re- or deallocation.
* An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
* indicating success or failure in its data.
*
* The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
* %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
* instead of allocated.
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
*
* To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
* for the lifetime of the fd or handle.
* In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
* for the duration of a bus generation.
*
* @channels is a host-endian bitfield with the least significant bit
* representing channel 0 and the most significant bit representing channel 63:
* 1ULL << c for each channel c that is a candidate for (de)allocation.
*
* @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
* one quadlet of data (payload or header data) at speed S1600.
*/
struct fw_cdev_allocate_iso_resource {
__u64 closure;
__u64 channels;
__u32 bandwidth;
__u32 handle;
};
/**
* struct fw_cdev_send_stream_packet - send an asynchronous stream packet
* @length: Length of outgoing payload, in bytes
* @tag: Data format tag
* @channel: Isochronous channel to transmit to
* @sy: Synchronization code
* @closure: Passed back to userspace in the response event
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
* @speed: Speed to transmit at
*
* The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
* to every device which is listening to the specified channel. The kernel
* writes an &fw_cdev_event_response event which indicates success or failure of
* the transmission.
*/
struct fw_cdev_send_stream_packet {
__u32 length;
__u32 tag;
__u32 channel;
__u32 sy;
__u64 closure;
__u64 data;
__u32 generation;
__u32 speed;
};
#endif /* _LINUX_FIREWIRE_CDEV_H */ #endif /* _LINUX_FIREWIRE_CDEV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment