Commit daf4b805 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
  firewire: reorder struct fw_card for better cache efficiency
  firewire: fix resetting of bus manager retry counter
  firewire: improve refcounting of fw_card
  firewire: typo in comment
  firewire: fix small memory leak at module removal
  firewire: fw-sbp2: remove unnecessary locking
  ieee1934: dv1394: interrupt enabling/disabling broken on big-endian
  ieee1394: dv1394: annotate frame input/output structs as little endian
  ieee1394: eth1394: trivial sparse annotations
  ieee1394: mark bus_info_data as a __be32 array
  ieee1394: replace CSR_SET_BUS_INFO_GENERATION macro
  ieee1394: pcilynx: trivial endian annotation
  ieee1394: ignore nonzero Bus_Info_Block.max_rom, fetch config ROM in quadlets
  ieee1394: consolidate uses of IEEE1934_BUSID_MAGIC
  ieee1394: ohci1394: flush MMIO writes before delay in initialization
  ieee1394: ohci1394: pass error codes from request_irq through
  ieee1394: ohci1394: don't leave interrupts enabled during suspend/resume
  ieee1394: mark all hpsb_address_ops instances as const
  ieee1394: replace a GFP_ATOMIC by GFP_KERNEL allocation
parents a0c9f240 c8a12d45
......@@ -75,7 +75,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
* controller, block reads to the config rom accesses the host
* memory, but quadlet read access the hardware bus info block
* registers. That's just crack, but it means we should make
* sure the contents of bus info block in host memory mathces
* sure the contents of bus info block in host memory matches
* the version stored in the OHCI registers.
*/
......@@ -189,6 +189,17 @@ static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
};
void
fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
int scheduled;
fw_card_get(card);
scheduled = schedule_delayed_work(&card->work, delay);
if (!scheduled)
fw_card_put(card);
}
static void
fw_card_bm_work(struct work_struct *work)
{
......@@ -206,7 +217,7 @@ fw_card_bm_work(struct work_struct *work)
if (local_node == NULL) {
spin_unlock_irqrestore(&card->lock, flags);
return;
goto out_put_card;
}
fw_node_get(local_node);
fw_node_get(root_node);
......@@ -280,7 +291,7 @@ fw_card_bm_work(struct work_struct *work)
* this task 100ms from now.
*/
spin_unlock_irqrestore(&card->lock, flags);
schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10));
goto out;
}
......@@ -355,6 +366,8 @@ fw_card_bm_work(struct work_struct *work)
fw_device_put(root_device);
fw_node_put(root_node);
fw_node_put(local_node);
out_put_card:
fw_card_put(card);
}
static void
......@@ -510,7 +523,6 @@ fw_core_remove_card(struct fw_card *card)
fw_card_put(card);
wait_for_completion(&card->done);
cancel_delayed_work_sync(&card->work);
WARN_ON(!list_empty(&card->transaction_list));
del_timer_sync(&card->flush_timer);
}
......
......@@ -617,7 +617,7 @@ static int shutdown_unit(struct device *device, void *data)
*/
DECLARE_RWSEM(fw_device_rwsem);
static DEFINE_IDR(fw_device_idr);
DEFINE_IDR(fw_device_idr);
int fw_cdev_major;
struct fw_device *fw_device_get_by_devt(dev_t devt)
......@@ -689,7 +689,7 @@ static void fw_device_init(struct work_struct *work)
fw_notify("giving up on config rom for node id %x\n",
device->node_id);
if (device->node == device->card->root_node)
schedule_delayed_work(&device->card->work, 0);
fw_schedule_bm_work(device->card, 0);
fw_device_release(&device->device);
}
return;
......@@ -758,7 +758,7 @@ static void fw_device_init(struct work_struct *work)
* pretty harmless.
*/
if (device->node == device->card->root_node)
schedule_delayed_work(&device->card->work, 0);
fw_schedule_bm_work(device->card, 0);
return;
......@@ -892,7 +892,7 @@ static void fw_device_refresh(struct work_struct *work)
fw_device_shutdown(work);
out:
if (node_id == card->root_node->node_id)
schedule_delayed_work(&card->work, 0);
fw_schedule_bm_work(card, 0);
}
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
......
......@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/rwsem.h>
#include <asm/atomic.h>
......@@ -99,6 +100,7 @@ void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
extern struct rw_semaphore fw_device_rwsem;
extern struct idr fw_device_idr;
extern int fw_cdev_major;
/*
......
......@@ -670,17 +670,6 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
&d, sizeof(d), complete_agent_reset_write_no_wait, t);
}
static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
{
struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card;
unsigned long flags;
/* serialize with comparisons of lu->generation and card->generation */
spin_lock_irqsave(&card->lock, flags);
lu->generation = generation;
spin_unlock_irqrestore(&card->lock, flags);
}
static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
{
/*
......@@ -884,7 +873,7 @@ static void sbp2_login(struct work_struct *work)
goto out;
generation = device->generation;
smp_rmb(); /* node_id must not be older than generation */
smp_rmb(); /* node IDs must not be older than generation */
node_id = device->node_id;
local_node_id = device->card->node_id;
......@@ -908,7 +897,8 @@ static void sbp2_login(struct work_struct *work)
tgt->node_id = node_id;
tgt->address_high = local_node_id << 16;
sbp2_set_generation(lu, generation);
smp_wmb(); /* node IDs must not be older than generation */
lu->generation = generation;
lu->command_block_agent_address =
((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
......@@ -1201,7 +1191,7 @@ static void sbp2_reconnect(struct work_struct *work)
goto out;
generation = device->generation;
smp_rmb(); /* node_id must not be older than generation */
smp_rmb(); /* node IDs must not be older than generation */
node_id = device->node_id;
local_node_id = device->card->node_id;
......@@ -1228,7 +1218,8 @@ static void sbp2_reconnect(struct work_struct *work)
tgt->node_id = node_id;
tgt->address_high = local_node_id << 16;
sbp2_set_generation(lu, generation);
smp_wmb(); /* node IDs must not be older than generation */
lu->generation = generation;
fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
tgt->bus_id, lu->lun, lu->retries);
......
......@@ -355,6 +355,9 @@ report_lost_node(struct fw_card *card,
{
fw_node_event(card, node, FW_NODE_DESTROYED);
fw_node_put(node);
/* Topology has changed - reset bus manager retry counter */
card->bm_retries = 0;
}
static void
......@@ -374,6 +377,9 @@ report_found_node(struct fw_card *card,
}
fw_node_event(card, node, FW_NODE_CREATED);
/* Topology has changed - reset bus manager retry counter */
card->bm_retries = 0;
}
void fw_destroy_nodes(struct fw_card *card)
......@@ -514,14 +520,6 @@ fw_core_handle_bus_reset(struct fw_card *card,
spin_lock_irqsave(&card->lock, flags);
/*
* If the new topology has a different self_id_count the topology
* changed, either nodes were added or removed. In that case we
* reset the IRM reset counter.
*/
if (card->self_id_count != self_id_count)
card->bm_retries = 0;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using
......@@ -530,7 +528,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
smp_wmb();
card->generation = generation;
card->reset_jiffies = jiffies;
schedule_delayed_work(&card->work, 0);
fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count);
......
......@@ -19,6 +19,7 @@
*/
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/module.h>
......@@ -971,6 +972,7 @@ static void __exit fw_core_cleanup(void)
{
unregister_chrdev(fw_cdev_major, "firewire");
bus_unregister(&fw_bus_type);
idr_destroy(&fw_device_idr);
}
module_init(fw_core_init);
......
......@@ -237,14 +237,6 @@ struct fw_card {
int link_speed;
int config_rom_generation;
/*
* We need to store up to 4 self ID for a maximum of 63
* devices plus 3 words for the topology map header.
*/
int self_id_count;
u32 topology_map[252 + 3];
u32 broadcast_channel;
spinlock_t lock; /* Take this lock when handling the lists in
* this struct. */
struct fw_node *local_node;
......@@ -262,6 +254,9 @@ struct fw_card {
struct delayed_work work;
int bm_retries;
int bm_generation;
u32 broadcast_channel;
u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
......@@ -278,6 +273,8 @@ static inline void fw_card_put(struct fw_card *card)
kref_put(&card->kref, fw_card_release);
}
extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
/*
* The iso packet format allows for an immediate header/payload part
* stored in 'header' immediately after the packet info plus an
......
......@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
.host_reset = host_reset,
};
static struct hpsb_address_ops map_ops = {
const static struct hpsb_address_ops map_ops = {
.read = read_maps,
};
static struct hpsb_address_ops fcp_ops = {
const static struct hpsb_address_ops fcp_ops = {
.write = write_fcp,
};
static struct hpsb_address_ops reg_ops = {
const static struct hpsb_address_ops reg_ops = {
.read = read_regs,
.write = write_regs,
.lock = lock_regs,
.lock64 = lock64_regs,
};
static struct hpsb_address_ops config_rom_ops = {
const static struct hpsb_address_ops config_rom_ops = {
.read = read_config_rom,
};
......@@ -217,7 +217,7 @@ static void add_host(struct hpsb_host *host)
host->csr.generation = 2;
bus_info[1] = __constant_cpu_to_be32(0x31333934);
bus_info[1] = IEEE1394_BUSID_MAGIC;
bus_info[2] = cpu_to_be32((hpsb_disable_irm ? 0 : 1 << CSR_IRMC_SHIFT) |
(1 << CSR_CMC_SHIFT) |
(1 << CSR_ISC_SHIFT) |
......@@ -250,7 +250,7 @@ static void remove_host(struct hpsb_host *host)
{
quadlet_t bus_info[CSR_BUS_INFO_SIZE];
bus_info[1] = __constant_cpu_to_be32(0x31333934);
bus_info[1] = IEEE1394_BUSID_MAGIC;
bus_info[2] = cpu_to_be32((0 << CSR_IRMC_SHIFT) |
(0 << CSR_CMC_SHIFT) |
(0 << CSR_ISC_SHIFT) |
......
......@@ -50,11 +50,11 @@
#define CSR_MAX_ROM_SHIFT 8
#define CSR_GENERATION_SHIFT 4
#define CSR_SET_BUS_INFO_GENERATION(csr, gen) \
((csr)->bus_info_data[2] = \
cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) & \
~(0xf << CSR_GENERATION_SHIFT)) | \
(gen) << CSR_GENERATION_SHIFT))
static inline void csr_set_bus_info_generation(struct csr1212_csr *csr, u8 gen)
{
csr->bus_info_data[2] &= ~cpu_to_be32(0xf << CSR_GENERATION_SHIFT);
csr->bus_info_data[2] |= cpu_to_be32((u32)gen << CSR_GENERATION_SHIFT);
}
struct csr_control {
spinlock_t lock;
......
......@@ -1077,15 +1077,10 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
int i;
int ret;
/* IEEE 1212 says that the entire bus info block should be readable in
* a single transaction regardless of the max_rom value.
* Unfortunately, many IEEE 1394 devices do not abide by that, so the
* bus info block will be read 1 quadlet at a time. The rest of the
* ConfigROM will be read according to the max_rom field. */
for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
csr->private);
&csr->cache_head->data[bytes_to_quads(i)],
csr->private);
if (ret != CSR1212_SUCCESS)
return ret;
......@@ -1104,8 +1099,8 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
* a time. */
for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
csr->private);
&csr->cache_head->data[bytes_to_quads(i)],
csr->private);
if (ret != CSR1212_SUCCESS)
return ret;
}
......@@ -1289,7 +1284,7 @@ csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
if (csr->ops->bus_read(csr,
CSR1212_REGISTER_SPACE_BASE + kv->offset,
sizeof(u32), &q, csr->private))
&q, csr->private))
return -EIO;
kv->value.leaf.len = be32_to_cpu(q) >> 16;
......@@ -1372,17 +1367,8 @@ csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
cr->offset_end) & ~(csr->max_rom - 1);
if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
csr->private)) {
if (csr->max_rom == 4)
/* We've got problems! */
return -EIO;
/* Apperently the max_rom value was a lie, set it to
* do quadlet reads and try again. */
csr->max_rom = 4;
continue;
}
if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private))
return -EIO;
cr->offset_end += csr->max_rom - (cr->offset_end &
(csr->max_rom - 1));
......@@ -1433,7 +1419,6 @@ csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
int csr1212_parse_csr(struct csr1212_csr *csr)
{
static const int mr_map[] = { 4, 64, 1024, 0 };
struct csr1212_dentry *dentry;
int ret;
......@@ -1443,15 +1428,13 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
if (ret != CSR1212_SUCCESS)
return ret;
if (!csr->ops->get_max_rom) {
csr->max_rom = mr_map[0]; /* default value */
} else {
int i = csr->ops->get_max_rom(csr->bus_info_data,
csr->private);
if (i & ~0x3)
return -EINVAL;
csr->max_rom = mr_map[i];
}
/*
* There has been a buggy firmware with bus_info_block.max_rom > 0
* spotted which actually only supported quadlet read requests to the
* config ROM. Therefore read everything quadlet by quadlet regardless
* of what the bus info block says.
*/
csr->max_rom = 4;
csr->cache_head->layout_head = csr->root_kv;
csr->cache_head->layout_tail = csr->root_kv;
......
......@@ -181,7 +181,7 @@ struct csr1212_csr_rom_cache {
struct csr1212_csr {
size_t bus_info_len; /* bus info block length in bytes */
size_t crc_len; /* crc length in bytes */
u32 *bus_info_data; /* bus info data incl bus name and EUI */
__be32 *bus_info_data; /* bus info data incl bus name and EUI */
void *private; /* private, bus specific data */
struct csr1212_bus_ops *ops;
......@@ -200,7 +200,7 @@ struct csr1212_bus_ops {
* entries located in the Units Space. Must return 0 on success
* anything else indicates an error. */
int (*bus_read) (struct csr1212_csr *csr, u64 addr,
u16 length, void *buffer, void *private);
void *buffer, void *private);
/* This function is used by csr1212 to allocate a region in units space
* in the event that Config ROM entries don't all fit in the predefined
......@@ -211,11 +211,6 @@ struct csr1212_bus_ops {
/* This function is used by csr1212 to release a region in units space
* that is no longer needed. */
void (*release_addr) (u64 addr, void *private);
/* This function is used by csr1212 to determine the max read request
* supported by a remote node when reading the ConfigROM space. Must
* return 0, 1, or 2 per IEEE 1212. */
int (*get_max_rom) (u32 *bus_info, void *private);
};
......
......@@ -77,11 +77,11 @@ static inline void fill_cip_header(struct CIP_header *cip,
See the Texas Instruments OHCI 1394 chipset documentation.
*/
struct output_more_immediate { u32 q[8]; };
struct output_more { u32 q[4]; };
struct output_last { u32 q[4]; };
struct input_more { u32 q[4]; };
struct input_last { u32 q[4]; };
struct output_more_immediate { __le32 q[8]; };
struct output_more { __le32 q[4]; };
struct output_last { __le32 q[4]; };
struct input_more { __le32 q[4]; };
struct input_last { __le32 q[4]; };
/* outputs */
......@@ -92,9 +92,9 @@ static inline void fill_output_more_immediate(struct output_more_immediate *omi,
unsigned int payload_size)
{
omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
omi->q[1] = 0;
omi->q[2] = 0;
omi->q[3] = 0;
omi->q[1] = cpu_to_le32(0);
omi->q[2] = cpu_to_le32(0);
omi->q[3] = cpu_to_le32(0);
/* IT packet header */
omi->q[4] = cpu_to_le32( (0x0 << 16) /* IEEE1394_SPEED_100 */
......@@ -106,8 +106,8 @@ static inline void fill_output_more_immediate(struct output_more_immediate *omi,
/* reserved field; mimic behavior of my Sony DSR-40 */
omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
omi->q[6] = 0;
omi->q[7] = 0;
omi->q[6] = cpu_to_le32(0);
omi->q[7] = cpu_to_le32(0);
}
static inline void fill_output_more(struct output_more *om,
......@@ -116,8 +116,8 @@ static inline void fill_output_more(struct output_more *om,
{
om->q[0] = cpu_to_le32(data_size);
om->q[1] = cpu_to_le32(data_phys_addr);
om->q[2] = 0;
om->q[3] = 0;
om->q[2] = cpu_to_le32(0);
om->q[3] = cpu_to_le32(0);
}
static inline void fill_output_last(struct output_last *ol,
......@@ -140,8 +140,8 @@ static inline void fill_output_last(struct output_last *ol,
ol->q[0] = cpu_to_le32(temp);
ol->q[1] = cpu_to_le32(data_phys_addr);
ol->q[2] = 0;
ol->q[3] = 0;
ol->q[2] = cpu_to_le32(0);
ol->q[3] = cpu_to_le32(0);
}
/* inputs */
......@@ -161,8 +161,8 @@ static inline void fill_input_more(struct input_more *im,
im->q[0] = cpu_to_le32(temp);
im->q[1] = cpu_to_le32(data_phys_addr);
im->q[2] = 0; /* branchAddress and Z not use in packet-per-buffer mode */
im->q[3] = 0; /* xferStatus & resCount, resCount must be initialize to data_size */
im->q[2] = cpu_to_le32(0); /* branchAddress and Z not use in packet-per-buffer mode */
im->q[3] = cpu_to_le32(0); /* xferStatus & resCount, resCount must be initialize to data_size */
}
static inline void fill_input_last(struct input_last *il,
......@@ -331,7 +331,7 @@ struct frame {
/* points to status/timestamp field of first DMA packet */
/* (we'll check it later to monitor timestamp accuracy) */
u32 *frame_begin_timestamp;
__le32 *frame_begin_timestamp;
/* the timestamp we assigned to the first packet in the frame */
u32 assigned_timestamp;
......@@ -348,15 +348,15 @@ struct frame {
that can cause interrupts. We'll check these from the
interrupt handler.
*/
u32 *mid_frame_timestamp;
u32 *frame_end_timestamp;
__le32 *mid_frame_timestamp;
__le32 *frame_end_timestamp;
/* branch address field of final packet. This is effectively
the "tail" in the chain of DMA descriptor blocks.
We will fill it with the address of the first DMA descriptor
block in the subsequent frame, once it is ready.
*/
u32 *frame_end_branch;
__le32 *frame_end_branch;
/* the number of descriptors in the first descriptor block
of the frame. Needed to start DMA */
......@@ -365,10 +365,10 @@ struct frame {
struct packet {
u16 timestamp;
__le16 timestamp;
u16 invalid;
u16 iso_header;
u16 data_length;
__le16 data_length;
u32 cip_h1;
u32 cip_h2;
unsigned char data[480];
......
......@@ -265,7 +265,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* these flags denote packets that need special attention */
int empty_packet, first_packet, last_packet, mid_packet;
u32 *branch_address, *last_branch_address = NULL;
__le32 *branch_address, *last_branch_address = NULL;
unsigned long data_p;
int first_packet_empty = 0;
u32 cycleTimer, ct_sec, ct_cyc, ct_off;
......@@ -848,7 +848,7 @@ static void receive_packets(struct video_card *video)
dma_addr_t block_dma = 0;
struct packet *data = NULL;
dma_addr_t data_dma = 0;
u32 *last_branch_address = NULL;
__le32 *last_branch_address = NULL;
unsigned long irq_flags;
int want_interrupt = 0;
struct frame *f = NULL;
......@@ -2110,17 +2110,17 @@ static void ir_tasklet_func(unsigned long data)
f = video->frames[next_i / MAX_PACKETS];
next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
next->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
next->u.in.il.q[2] = 0; /* disable branch */
next->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
next->u.in.il.q[2] = cpu_to_le32(0); /* disable branch */
/* link previous to next */
prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
f = video->frames[prev_i / MAX_PACKETS];
prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
if (prev_i % (MAX_PACKETS/2)) {
prev->u.in.il.q[0] &= ~(3 << 20); /* no interrupt */
prev->u.in.il.q[0] &= ~cpu_to_le32(3 << 20); /* no interrupt */
} else {
prev->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
prev->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
}
prev->u.in.il.q[2] = cpu_to_le32(next_dma | 1); /* set Z=1 */
wmb();
......
......@@ -92,7 +92,7 @@ struct partial_datagram {
struct list_head list;
u16 dgl;
u16 dg_size;
u16 ether_type;
__be16 ether_type;
struct sk_buff *skb;
char *pbuf;
struct list_head frag_info;
......@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
static void ether1394_host_reset(struct hpsb_host *host);
/* Function for incoming 1394 packets */
static struct hpsb_address_ops addr_ops = {
const static struct hpsb_address_ops addr_ops = {
.write = ether1394_write,
};
......@@ -767,7 +767,7 @@ static int ether1394_header_parse(const struct sk_buff *skb,
static int ether1394_header_cache(const struct neighbour *neigh,
struct hh_cache *hh)
{
unsigned short type = hh->hh_type;
__be16 type = hh->hh_type;
struct net_device *dev = neigh->dev;
struct eth1394hdr *eth =
(struct eth1394hdr *)((u8 *)hh->hh_data + 16 - ETH1394_HLEN);
......@@ -795,7 +795,7 @@ static void ether1394_header_cache_update(struct hh_cache *hh,
******************************************/
/* Copied from net/ethernet/eth.c */
static u16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
static __be16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct eth1394hdr *eth;
unsigned char *rawp;
......@@ -829,17 +829,17 @@ static u16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
/* Parse an encapsulated IP1394 header into an ethernet frame packet.
* We also perform ARP translation here, if need be. */
static u16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
nodeid_t srcid, nodeid_t destid,
u16 ether_type)
__be16 ether_type)
{
struct eth1394_priv *priv = netdev_priv(dev);
u64 dest_hw;
unsigned short ret = 0;
__be64 dest_hw;
__be16 ret = 0;
/* Setup our hw addresses. We use these to build the ethernet header. */
if (destid == (LOCAL_BUS | ALL_NODES))
dest_hw = ~0ULL; /* broadcast */
dest_hw = ~cpu_to_be64(0); /* broadcast */
else
dest_hw = cpu_to_be64((u64)priv->host->csr.guid_hi << 32 |
priv->host->csr.guid_lo);
......@@ -873,7 +873,7 @@ static u16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
node = eth1394_find_node_guid(&priv->ip_node_list,
be64_to_cpu(guid));
if (!node)
return 0;
return cpu_to_be16(0);
node_info =
(struct eth1394_node_info *)node->ud->device.driver_data;
......@@ -1063,7 +1063,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
unsigned long flags;
struct eth1394_priv *priv = netdev_priv(dev);
union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
u16 ether_type = 0; /* initialized to clear warning */
__be16 ether_type = cpu_to_be16(0); /* initialized to clear warning */
int hdr_len;
struct unit_directory *ud = priv->ud_list[NODEID_TO_NODE(srcid)];
struct eth1394_node_info *node_info;
......@@ -1259,7 +1259,7 @@ static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
static void ether1394_iso(struct hpsb_iso *iso)
{
quadlet_t *data;
__be32 *data;
char *buf;
struct eth1394_host_info *hi;
struct net_device *dev;
......@@ -1283,7 +1283,7 @@ static void ether1394_iso(struct hpsb_iso *iso)
for (i = 0; i < nready; i++) {
struct hpsb_iso_packet_info *info =
&iso->infos[(iso->first_packet + i) % iso->buf_packets];
data = (quadlet_t *)(iso->data_buf.kvirt + info->offset);
data = (__be32 *)(iso->data_buf.kvirt + info->offset);
/* skip over GASP header */
buf = (char *)data + 8;
......@@ -1614,7 +1614,7 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
priv->bc_dgl++;
} else {
__be64 guid = get_unaligned((u64 *)hdr_buf.h_dest);
__be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
node = eth1394_find_node_guid(&priv->ip_node_list,
be64_to_cpu(guid));
......
......@@ -82,7 +82,7 @@ struct eth1394_priv {
struct eth1394hdr {
unsigned char h_dest[ETH1394_ALEN]; /* destination eth1394 addr */
unsigned short h_proto; /* packet type ID field */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
......@@ -99,13 +99,13 @@ typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
struct eth1394_uf_hdr {
u16 lf:2;
u16 res:14;
u16 ether_type; /* Ethernet packet type */
__be16 ether_type; /* Ethernet packet type */
} __attribute__((packed));
#elif defined __LITTLE_ENDIAN_BITFIELD
struct eth1394_uf_hdr {
u16 res:14;
u16 lf:2;
u16 ether_type;
__be16 ether_type;
} __attribute__((packed));
#else
#error Unknown bit field type
......@@ -117,7 +117,7 @@ struct eth1394_ff_hdr {
u16 lf:2;
u16 res1:2;
u16 dg_size:12; /* Datagram size */
u16 ether_type; /* Ethernet packet type */
__be16 ether_type; /* Ethernet packet type */
u16 dgl; /* Datagram label */
u16 res2;
} __attribute__((packed));
......@@ -126,7 +126,7 @@ struct eth1394_ff_hdr {
u16 dg_size:12;
u16 res1:2;
u16 lf:2;
u16 ether_type;
__be16 ether_type;
u16 dgl;
u16 res2;
} __attribute__((packed));
......@@ -207,11 +207,11 @@ struct eth1394_arp {
u16 opcode; /* ARP Opcode */
/* Above is exactly the same format as struct arphdr */
u64 s_uniq_id; /* Sender's 64bit EUI */
__be64 s_uniq_id; /* Sender's 64bit EUI */
u8 max_rec; /* Sender's max packet size */
u8 sspd; /* Sender's max speed */
u16 fifo_hi; /* hi 16bits of sender's FIFO addr */
u32 fifo_lo; /* lo 32bits of sender's FIFO addr */
__be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
__be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
u32 sip; /* Sender's IP Address */
u32 tip; /* IP Address of requested hw addr */
};
......
......@@ -320,7 +320,7 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
*/
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
struct hpsb_host *host,
struct hpsb_address_ops *ops,
const struct hpsb_address_ops *ops,
u64 size, u64 alignment,
u64 start, u64 end)
{
......@@ -407,7 +407,8 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
* are automatically deallocated together with the hpsb_highlevel @hl.
*/
int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
struct hpsb_address_ops *ops, u64 start, u64 end)
const struct hpsb_address_ops *ops,
u64 start, u64 end)
{
struct hpsb_address_serve *as;
struct list_head *lh;
......@@ -420,7 +421,7 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return 0;
}
as = kmalloc(sizeof(*as), GFP_ATOMIC);
as = kmalloc(sizeof(*as), GFP_KERNEL);
if (!as)
return 0;
......@@ -477,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return retval;
}
static struct hpsb_address_ops dummy_ops;
const static struct hpsb_address_ops dummy_ops;
/* dummy address spaces as lower and upper bounds of the host's a.s. list */
static void init_hpsb_highlevel(struct hpsb_host *host)
......
......@@ -15,7 +15,7 @@ struct hpsb_host;
struct hpsb_address_serve {
struct list_head host_list; /* per host list */
struct list_head hl_list; /* hpsb_highlevel list */
struct hpsb_address_ops *op;
const struct hpsb_address_ops *op;
struct hpsb_host *host;
u64 start; /* first address handled, quadlet aligned */
u64 end; /* first address behind, quadlet aligned */
......@@ -119,11 +119,12 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
struct hpsb_host *host,
struct hpsb_address_ops *ops,
const struct hpsb_address_ops *ops,
u64 size, u64 alignment,
u64 start, u64 end);
int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
struct hpsb_address_ops *ops, u64 start, u64 end);
const struct hpsb_address_ops *ops,
u64 start, u64 end);
int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
u64 start);
......
......@@ -34,18 +34,18 @@ static void delayed_reset_bus(struct work_struct *work)
{
struct hpsb_host *host =
container_of(work, struct hpsb_host, delayed_reset.work);
int generation = host->csr.generation + 1;
u8 generation = host->csr.generation + 1;
/* The generation field rolls over to 2 rather than 0 per IEEE
* 1394a-2000. */
if (generation > 0xf || generation < 2)
generation = 2;
CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation);
csr_set_bus_info_generation(host->csr.rom, generation);
if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
/* CSR image creation failed.
* Reset generation field and do not issue a bus reset. */
CSR_SET_BUS_INFO_GENERATION(host->csr.rom,
csr_set_bus_info_generation(host->csr.rom,
host->csr.generation);
return;
}
......
......@@ -154,7 +154,7 @@ struct hpsb_host_driver {
* to set the hardware ConfigROM if the hardware supports handling
* reads to the ConfigROM on its own. */
void (*set_hw_config_rom)(struct hpsb_host *host,
quadlet_t *config_rom);
__be32 *config_rom);
/* This function shall implement packet transmission based on
* packet->type. It shall CRC both parts of the packet (unless
......
......@@ -121,6 +121,9 @@ extern const char *hpsb_speedto_str[];
#include <asm/byteorder.h>
/* '1' '3' '9' '4' in ASCII */
#define IEEE1394_BUSID_MAGIC cpu_to_be32(0x31333934)
#ifdef __BIG_ENDIAN_BITFIELD
struct selfid {
......
......@@ -67,7 +67,7 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
*speed = i;
error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
&q, sizeof(quadlet_t));
&q, 4);
if (error)
break;
*buffer = q;
......@@ -85,7 +85,7 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
return error;
}
static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr,
void *buffer, void *__ci)
{
struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
......@@ -93,7 +93,7 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
for (i = 1; ; i++) {
error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
buffer, length);
buffer, 4);
if (!error) {
ci->speed_unverified = 0;
break;
......@@ -104,7 +104,7 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
/* The ieee1394_core guessed the node's speed capability from
* the self ID. Check whether a lower speed works. */
if (ci->speed_unverified && length == sizeof(quadlet_t)) {
if (ci->speed_unverified) {
error = nodemgr_check_speed(ci, addr, buffer);
if (!error)
break;
......@@ -115,20 +115,8 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
return error;
}
#define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db
static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
{
/* Freecom FireWire Hard Drive firmware bug */
if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH)
return 0;
return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
}
static struct csr1212_bus_ops nodemgr_csr_ops = {
.bus_read = nodemgr_bus_read,
.get_max_rom = nodemgr_get_max_rom
};
......
......@@ -31,9 +31,6 @@ struct csr1212_keyval;
struct hpsb_host;
struct ieee1394_device_id;
/* '1' '3' '9' '4' in ASCII */
#define IEEE1394_BUSID_MAGIC __constant_cpu_to_be32(0x31333934)
/* This is the start of a Node entry structure. It should be a stable API
* for which to gather info from the Node Manager about devices attached
* to the bus. */
......
......@@ -2973,7 +2973,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
return 0;
}
static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
static void ohci_set_hw_config_rom(struct hpsb_host *host, __be32 *config_rom)
{
struct ti_ohci *ohci = host->hostdata;
......@@ -3199,15 +3199,16 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
/* Now enable LPS, which we need in order to start accessing
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. */
* will lock up the machine. */
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
/* Disable and clear interrupts */
reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
mdelay(50);
/* Flush MMIO writes and wait to make sure we have full link enabled. */
reg_read(ohci, OHCI1394_Version);
msleep(50);
/* Determine the number of available IR and IT contexts. */
ohci->nb_iso_rcv_ctx =
......@@ -3233,8 +3234,9 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
* we need to get to that "no event", so enough should be initialized
* by that point.
*/
if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
OHCI1394_DRIVER_NAME, ohci)) {
err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
OHCI1394_DRIVER_NAME, ohci);
if (err) {
PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
goto err;
}
......@@ -3381,6 +3383,7 @@ static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
ohci_soft_reset(ohci);
free_irq(dev->irq, ohci);
err = pci_save_state(dev);
if (err) {
PRINT(KERN_ERR, "pci_save_state failed with %d", err);
......@@ -3420,7 +3423,16 @@ static int ohci1394_pci_resume(struct pci_dev *dev)
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
mdelay(50);
reg_read(ohci, OHCI1394_Version);
msleep(50);
err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
OHCI1394_DRIVER_NAME, ohci);
if (err) {
PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
return err;
}
ohci_initialize(ohci);
hpsb_resume_host(ohci->host);
......
......@@ -1463,7 +1463,7 @@ static int __devinit add_card(struct pci_dev *dev,
/* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
(lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
(lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
{
PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
} else {
......
......@@ -52,7 +52,7 @@ struct ti_lynx {
void __iomem *local_rom;
void __iomem *local_ram;
void __iomem *aux_port;
quadlet_t bus_info_block[5];
__be32 bus_info_block[5];
/*
* use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
......
......@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
static struct hpsb_address_ops arm_ops = {
const static struct hpsb_address_ops arm_ops = {
.read = arm_read,
.write = arm_write,
.lock = arm_lock,
......
......@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
.host_reset = sbp2_host_reset,
};
static struct hpsb_address_ops sbp2_ops = {
const static struct hpsb_address_ops sbp2_ops = {
.write = sbp2_handle_status_write
};
......@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
size_t, u16);
static struct hpsb_address_ops sbp2_physdma_ops = {
const static struct hpsb_address_ops sbp2_physdma_ops = {
.read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment