Commit 78c92a9f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
  firewire: use split transaction timeout only for split transactions
  firewire: ohci: consolidate context status flags
  firewire: ohci: cache the context run bit
  firewire: ohci: flush AT contexts after bus reset - addendum
  firewire: ohci: flush AT contexts after bus reset for OHCI 1.2
  firewire: net: set carrier state at ifup
  firewire: net: add carrier detection
  firewire: net: ratelimit error messages
  firewire: ohci: restart iso DMA contexts on resume from low power mode
  firewire: ohci: restore GUID on resume.
  firewire: ohci: use common buffer for self IDs and AR descriptors
  firewire: ohci: optimize iso context checks in the interrupt handler
  firewire: make PHY packet header format consistent
  firewire: ohci: properly clear posted write errors
  firewire: ohci: flush MMIO writes in the interrupt handler
  firewire: ohci: fix AT context initialization error handling
  firewire: ohci: Asynchronous Reception rewrite
  firewire: core: Update WARN uses
  firewire: nosy: char device is not seekable
parents b65f0d67 410cf2bd
...@@ -19,7 +19,7 @@ config FIREWIRE ...@@ -19,7 +19,7 @@ config FIREWIRE
config FIREWIRE_OHCI config FIREWIRE_OHCI
tristate "OHCI-1394 controllers" tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE depends on PCI && FIREWIRE && MMU
help help
Enable this driver if you have a FireWire controller based Enable this driver if you have a FireWire controller based
on the OHCI specification. For all practical purposes, this on the OHCI specification. For all practical purposes, this
......
...@@ -1501,9 +1501,10 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) ...@@ -1501,9 +1501,10 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
e->client = client; e->client = client;
e->p.speed = SCODE_100; e->p.speed = SCODE_100;
e->p.generation = a->generation; e->p.generation = a->generation;
e->p.header[0] = a->data[0]; e->p.header[0] = TCODE_LINK_INTERNAL << 4;
e->p.header[1] = a->data[1]; e->p.header[1] = a->data[0];
e->p.header_length = 8; e->p.header[2] = a->data[1];
e->p.header_length = 12;
e->p.callback = outbound_phy_packet_callback; e->p.callback = outbound_phy_packet_callback;
e->phy_packet.closure = a->closure; e->phy_packet.closure = a->closure;
e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
......
...@@ -72,6 +72,15 @@ ...@@ -72,6 +72,15 @@
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30) #define PHY_IDENTIFIER(id) ((id) << 30)
/* returns 0 if the split timeout handler is already running */
static int try_cancel_split_timeout(struct fw_transaction *t)
{
if (t->is_split_transaction)
return del_timer(&t->split_timeout_timer);
else
return 1;
}
static int close_transaction(struct fw_transaction *transaction, static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode) struct fw_card *card, int rcode)
{ {
...@@ -81,7 +90,7 @@ static int close_transaction(struct fw_transaction *transaction, ...@@ -81,7 +90,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) { list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) { if (t == transaction) {
if (!del_timer(&t->split_timeout_timer)) { if (!try_cancel_split_timeout(t)) {
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
goto timed_out; goto timed_out;
} }
...@@ -141,16 +150,28 @@ static void split_transaction_timeout_callback(unsigned long data) ...@@ -141,16 +150,28 @@ static void split_transaction_timeout_callback(unsigned long data)
card->tlabel_mask &= ~(1ULL << t->tlabel); card->tlabel_mask &= ~(1ULL << t->tlabel);
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
card->driver->cancel_packet(card, &t->packet);
/*
* At this point cancel_packet will never call the transaction
* callback, since we just took the transaction out of the list.
* So do it here.
*/
t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
} }
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
spin_unlock_irqrestore(&card->lock, flags);
return;
}
t->is_split_transaction = true;
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
spin_unlock_irqrestore(&card->lock, flags);
}
static void transmit_complete_callback(struct fw_packet *packet, static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status) struct fw_card *card, int status)
{ {
...@@ -162,7 +183,7 @@ static void transmit_complete_callback(struct fw_packet *packet, ...@@ -162,7 +183,7 @@ static void transmit_complete_callback(struct fw_packet *packet,
close_transaction(t, card, RCODE_COMPLETE); close_transaction(t, card, RCODE_COMPLETE);
break; break;
case ACK_PENDING: case ACK_PENDING:
t->timestamp = packet->timestamp; start_split_transaction_timeout(t, card);
break; break;
case ACK_BUSY_X: case ACK_BUSY_X:
case ACK_BUSY_A: case ACK_BUSY_A:
...@@ -250,7 +271,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, ...@@ -250,7 +271,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break; break;
default: default:
WARN(1, "wrong tcode %d", tcode); WARN(1, "wrong tcode %d\n", tcode);
} }
common: common:
packet->speed = speed; packet->speed = speed;
...@@ -349,11 +370,9 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, ...@@ -349,11 +370,9 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
t->node_id = destination_id; t->node_id = destination_id;
t->tlabel = tlabel; t->tlabel = tlabel;
t->card = card; t->card = card;
t->is_split_transaction = false;
setup_timer(&t->split_timeout_timer, setup_timer(&t->split_timeout_timer,
split_transaction_timeout_callback, (unsigned long)t); split_transaction_timeout_callback, (unsigned long)t);
/* FIXME: start this timer later, relative to t->timestamp */
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
t->callback = callback; t->callback = callback;
t->callback_data = callback_data; t->callback_data = callback_data;
...@@ -423,7 +442,8 @@ static void transmit_phy_packet_callback(struct fw_packet *packet, ...@@ -423,7 +442,8 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
} }
static struct fw_packet phy_config_packet = { static struct fw_packet phy_config_packet = {
.header_length = 8, .header_length = 12,
.header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0, .payload_length = 0,
.speed = SCODE_100, .speed = SCODE_100,
.callback = transmit_phy_packet_callback, .callback = transmit_phy_packet_callback,
...@@ -451,8 +471,8 @@ void fw_send_phy_config(struct fw_card *card, ...@@ -451,8 +471,8 @@ void fw_send_phy_config(struct fw_card *card,
mutex_lock(&phy_config_mutex); mutex_lock(&phy_config_mutex);
phy_config_packet.header[0] = data; phy_config_packet.header[1] = data;
phy_config_packet.header[1] = ~data; phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation; phy_config_packet.generation = generation;
INIT_COMPLETION(phy_config_done); INIT_COMPLETION(phy_config_done);
...@@ -638,7 +658,7 @@ int fw_get_response_length(struct fw_request *r) ...@@ -638,7 +658,7 @@ int fw_get_response_length(struct fw_request *r)
} }
default: default:
WARN(1, "wrong tcode %d", tcode); WARN(1, "wrong tcode %d\n", tcode);
return 0; return 0;
} }
} }
...@@ -694,7 +714,7 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header, ...@@ -694,7 +714,7 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
break; break;
default: default:
WARN(1, "wrong tcode %d", tcode); WARN(1, "wrong tcode %d\n", tcode);
} }
response->payload_mapped = false; response->payload_mapped = false;
...@@ -925,7 +945,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) ...@@ -925,7 +945,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) { list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) { if (t->node_id == source && t->tlabel == tlabel) {
if (!del_timer(&t->split_timeout_timer)) { if (!try_cancel_split_timeout(t)) {
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
goto timed_out; goto timed_out;
} }
......
...@@ -215,9 +215,11 @@ static inline bool is_next_generation(int new_generation, int old_generation) ...@@ -215,9 +215,11 @@ static inline bool is_next_generation(int new_generation, int old_generation)
/* -transaction */ /* -transaction */
#define TCODE_LINK_INTERNAL 0xe
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe) #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/firewire.h> #include <linux/firewire.h>
#include <linux/firewire-constants.h> #include <linux/firewire-constants.h>
#include <linux/highmem.h> #include <linux/highmem.h>
...@@ -179,6 +180,7 @@ struct fwnet_device { ...@@ -179,6 +180,7 @@ struct fwnet_device {
/* Number of tx datagrams that have been queued but not yet acked */ /* Number of tx datagrams that have been queued but not yet acked */
int queued_datagrams; int queued_datagrams;
int peer_count;
struct list_head peer_list; struct list_head peer_list;
struct fw_card *card; struct fw_card *card;
struct net_device *netdev; struct net_device *netdev;
...@@ -996,15 +998,23 @@ static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) ...@@ -996,15 +998,23 @@ static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
static void fwnet_write_complete(struct fw_card *card, int rcode, static void fwnet_write_complete(struct fw_card *card, int rcode,
void *payload, size_t length, void *data) void *payload, size_t length, void *data)
{ {
struct fwnet_packet_task *ptask; struct fwnet_packet_task *ptask = data;
static unsigned long j;
ptask = data; static int last_rcode, errors_skipped;
if (rcode == RCODE_COMPLETE) { if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask); fwnet_transmit_packet_done(ptask);
} else { } else {
fw_error("fwnet_write_complete: failed: %x\n", rcode);
fwnet_transmit_packet_failed(ptask); fwnet_transmit_packet_failed(ptask);
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
fw_error("fwnet_write_complete: "
"failed: %x (skipped %d)\n", rcode, errors_skipped);
errors_skipped = 0;
last_rcode = rcode;
} else
errors_skipped++;
} }
} }
...@@ -1213,6 +1223,14 @@ static int fwnet_broadcast_start(struct fwnet_device *dev) ...@@ -1213,6 +1223,14 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
return retval; return retval;
} }
static void set_carrier_state(struct fwnet_device *dev)
{
if (dev->peer_count > 1)
netif_carrier_on(dev->netdev);
else
netif_carrier_off(dev->netdev);
}
/* ifup */ /* ifup */
static int fwnet_open(struct net_device *net) static int fwnet_open(struct net_device *net)
{ {
...@@ -1226,6 +1244,10 @@ static int fwnet_open(struct net_device *net) ...@@ -1226,6 +1244,10 @@ static int fwnet_open(struct net_device *net)
} }
netif_start_queue(net); netif_start_queue(net);
spin_lock_irq(&dev->lock);
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0; return 0;
} }
...@@ -1397,6 +1419,10 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu) ...@@ -1397,6 +1419,10 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu)
return 0; return 0;
} }
static const struct ethtool_ops fwnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops fwnet_netdev_ops = { static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open, .ndo_open = fwnet_open,
.ndo_stop = fwnet_stop, .ndo_stop = fwnet_stop,
...@@ -1415,6 +1441,7 @@ static void fwnet_init_dev(struct net_device *net) ...@@ -1415,6 +1441,7 @@ static void fwnet_init_dev(struct net_device *net)
net->hard_header_len = FWNET_HLEN; net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394; net->type = ARPHRD_IEEE1394;
net->tx_queue_len = FWNET_TX_QUEUE_LEN; net->tx_queue_len = FWNET_TX_QUEUE_LEN;
net->ethtool_ops = &fwnet_ethtool_ops;
} }
/* caller must hold fwnet_device_mutex */ /* caller must hold fwnet_device_mutex */
...@@ -1455,6 +1482,8 @@ static int fwnet_add_peer(struct fwnet_device *dev, ...@@ -1455,6 +1482,8 @@ static int fwnet_add_peer(struct fwnet_device *dev,
spin_lock_irq(&dev->lock); spin_lock_irq(&dev->lock);
list_add_tail(&peer->peer_link, &dev->peer_list); list_add_tail(&peer->peer_link, &dev->peer_list);
dev->peer_count++;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock); spin_unlock_irq(&dev->lock);
return 0; return 0;
...@@ -1535,13 +1564,15 @@ static int fwnet_probe(struct device *_dev) ...@@ -1535,13 +1564,15 @@ static int fwnet_probe(struct device *_dev)
return ret; return ret;
} }
static void fwnet_remove_peer(struct fwnet_peer *peer) static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
{ {
struct fwnet_partial_datagram *pd, *pd_next; struct fwnet_partial_datagram *pd, *pd_next;
spin_lock_irq(&peer->dev->lock); spin_lock_irq(&dev->lock);
list_del(&peer->peer_link); list_del(&peer->peer_link);
spin_unlock_irq(&peer->dev->lock); dev->peer_count--;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
fwnet_pd_delete(pd); fwnet_pd_delete(pd);
...@@ -1558,7 +1589,7 @@ static int fwnet_remove(struct device *_dev) ...@@ -1558,7 +1589,7 @@ static int fwnet_remove(struct device *_dev)
mutex_lock(&fwnet_device_mutex); mutex_lock(&fwnet_device_mutex);
fwnet_remove_peer(peer); fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) { if (list_empty(&dev->peer_list)) {
net = dev->netdev; net = dev->netdev;
......
...@@ -302,7 +302,7 @@ nosy_open(struct inode *inode, struct file *file) ...@@ -302,7 +302,7 @@ nosy_open(struct inode *inode, struct file *file)
file->private_data = client; file->private_data = client;
return 0; return nonseekable_open(inode, file);
fail: fail:
kfree(client); kfree(client);
lynx_put(lynx); lynx_put(lynx);
...@@ -405,7 +405,6 @@ static const struct file_operations nosy_ops = { ...@@ -405,7 +405,6 @@ static const struct file_operations nosy_ops = {
.poll = nosy_poll, .poll = nosy_poll,
.open = nosy_open, .open = nosy_open,
.release = nosy_release, .release = nosy_release,
.llseek = noop_llseek,
}; };
#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/bitops.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -40,6 +41,7 @@ ...@@ -40,6 +41,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/vmalloc.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -80,17 +82,23 @@ struct descriptor { ...@@ -80,17 +82,23 @@ struct descriptor {
#define COMMAND_PTR(regs) ((regs) + 12) #define COMMAND_PTR(regs) ((regs) + 12)
#define CONTEXT_MATCH(regs) ((regs) + 16) #define CONTEXT_MATCH(regs) ((regs) + 16)
struct ar_buffer { #define AR_BUFFER_SIZE (32*1024)
struct descriptor descriptor; #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
struct ar_buffer *next; /* we need at least two pages for proper list management */
__le32 data[0]; #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
};
#define MAX_ASYNC_PAYLOAD 4096
#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
struct ar_context { struct ar_context {
struct fw_ohci *ohci; struct fw_ohci *ohci;
struct ar_buffer *current_buffer; struct page *pages[AR_BUFFERS];
struct ar_buffer *last_buffer; void *buffer;
struct descriptor *descriptors;
dma_addr_t descriptors_bus;
void *pointer; void *pointer;
unsigned int last_buffer_index;
u32 regs; u32 regs;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
}; };
...@@ -117,6 +125,8 @@ struct context { ...@@ -117,6 +125,8 @@ struct context {
struct fw_ohci *ohci; struct fw_ohci *ohci;
u32 regs; u32 regs;
int total_allocation; int total_allocation;
bool running;
bool flushing;
/* /*
* List of page-sized buffers for storing DMA descriptors. * List of page-sized buffers for storing DMA descriptors.
...@@ -161,6 +171,9 @@ struct iso_context { ...@@ -161,6 +171,9 @@ struct iso_context {
int excess_bytes; int excess_bytes;
void *header; void *header;
size_t header_length; size_t header_length;
u8 sync;
u8 tags;
}; };
#define CONFIG_ROM_SIZE 1024 #define CONFIG_ROM_SIZE 1024
...@@ -177,7 +190,8 @@ struct fw_ohci { ...@@ -177,7 +190,8 @@ struct fw_ohci {
u32 bus_time; u32 bus_time;
bool is_root; bool is_root;
bool csr_state_setclear_abdicate; bool csr_state_setclear_abdicate;
int n_ir;
int n_it;
/* /*
* Spinlock for accessing fw_ohci data. Never call out of * Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held. * this driver with this lock held.
...@@ -186,6 +200,9 @@ struct fw_ohci { ...@@ -186,6 +200,9 @@ struct fw_ohci {
struct mutex phy_reg_mutex; struct mutex phy_reg_mutex;
void *misc_buffer;
dma_addr_t misc_buffer_bus;
struct ar_context ar_request_ctx; struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx; struct ar_context ar_response_ctx;
struct context at_request_ctx; struct context at_request_ctx;
...@@ -411,10 +428,6 @@ static const char *tcodes[] = { ...@@ -411,10 +428,6 @@ static const char *tcodes[] = {
[0xc] = "-reserved-", [0xd] = "-reserved-", [0xc] = "-reserved-", [0xd] = "-reserved-",
[0xe] = "link internal", [0xf] = "-reserved-", [0xe] = "link internal", [0xf] = "-reserved-",
}; };
static const char *phys[] = {
[0x0] = "phy config packet", [0x1] = "link-on packet",
[0x2] = "self-id packet", [0x3] = "-reserved-",
};
static void log_ar_at_event(char dir, int speed, u32 *header, int evt) static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
{ {
...@@ -433,12 +446,6 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) ...@@ -433,12 +446,6 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
return; return;
} }
if (header[0] == ~header[1]) {
fw_notify("A%c %s, %s, %08x\n",
dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
return;
}
switch (tcode) { switch (tcode) {
case 0x0: case 0x6: case 0x8: case 0x0: case 0x6: case 0x8:
snprintf(specific, sizeof(specific), " = %08x", snprintf(specific, sizeof(specific), " = %08x",
...@@ -453,9 +460,13 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) ...@@ -453,9 +460,13 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
} }
switch (tcode) { switch (tcode) {
case 0xe: case 0xa: case 0xa:
fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
break; break;
case 0xe:
fw_notify("A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]);
break;
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
fw_notify("A%c spd %x tl %02x, " fw_notify("A%c spd %x tl %02x, "
"%04x -> %04x, %s, " "%04x -> %04x, %s, "
...@@ -594,59 +605,150 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr, ...@@ -594,59 +605,150 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
return ret; return ret;
} }
static void ar_context_link_page(struct ar_context *ctx, static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
struct ar_buffer *ab, dma_addr_t ab_bus) {
return page_private(ctx->pages[i]);
}
static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
{ {
size_t offset; struct descriptor *d;
ab->next = NULL; d = &ctx->descriptors[index];
memset(&ab->descriptor, 0, sizeof(ab->descriptor)); d->branch_address &= cpu_to_le32(~0xf);
ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | d->res_count = cpu_to_le16(PAGE_SIZE);
DESCRIPTOR_STATUS | d->transfer_status = 0;
DESCRIPTOR_BRANCH_ALWAYS);
offset = offsetof(struct ar_buffer, data);
ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
ab->descriptor.branch_address = 0;
wmb(); /* finish init of new descriptors before branch_address update */ wmb(); /* finish init of new descriptors before branch_address update */
ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); d = &ctx->descriptors[ctx->last_buffer_index];
ctx->last_buffer->next = ab; d->branch_address |= cpu_to_le32(1);
ctx->last_buffer = ab;
ctx->last_buffer_index = index;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
flush_writes(ctx->ohci); flush_writes(ctx->ohci);
} }
static int ar_context_add_page(struct ar_context *ctx) static void ar_context_release(struct ar_context *ctx)
{ {
struct device *dev = ctx->ohci->card.device; unsigned int i;
struct ar_buffer *ab;
dma_addr_t uninitialized_var(ab_bus);
ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); if (ctx->buffer)
if (ab == NULL) vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
return -ENOMEM;
for (i = 0; i < AR_BUFFERS; i++)
if (ctx->pages[i]) {
dma_unmap_page(ctx->ohci->card.device,
ar_buffer_bus(ctx, i),
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(ctx->pages[i]);
}
}
ar_context_link_page(ctx, ab, ab_bus); static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
{
if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
flush_writes(ctx->ohci);
return 0; fw_error("AR error: %s; DMA stopped\n", error_msg);
}
/* FIXME: restart? */
} }
static void ar_context_release(struct ar_context *ctx) static inline unsigned int ar_next_buffer_index(unsigned int index)
{
return (index + 1) % AR_BUFFERS;
}
static inline unsigned int ar_prev_buffer_index(unsigned int index)
{
return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
}
static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
{
return ar_next_buffer_index(ctx->last_buffer_index);
}
/*
* We search for the buffer that contains the last AR packet DMA data written
* by the controller.
*/
static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
unsigned int *buffer_offset)
{
unsigned int i, next_i, last = ctx->last_buffer_index;
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
next_res_count = ACCESS_ONCE(
ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
*/
if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
/*
* The exception is when the DMA data for one packet is
* split over three buffers; in this case, the middle
* buffer's descriptor might be never updated by the
* controller and look still empty, and we have to peek
* at the third one.
*/
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
next_res_count = ACCESS_ONCE(
ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
break;
}
next_buffer_is_active:
i = next_i;
res_count = next_res_count;
}
rmb(); /* read res_count before the DMA data */
*buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
if (*buffer_offset > PAGE_SIZE) {
*buffer_offset = 0;
ar_context_abort(ctx, "corrupted descriptor");
}
return i;
}
static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
unsigned int end_buffer_index,
unsigned int end_buffer_offset)
{ {
struct ar_buffer *ab, *ab_next; unsigned int i;
size_t offset;
dma_addr_t ab_bus;
for (ab = ctx->current_buffer; ab; ab = ab_next) { i = ar_first_buffer_index(ctx);
ab_next = ab->next; while (i != end_buffer_index) {
offset = offsetof(struct ar_buffer, data); dma_sync_single_for_cpu(ctx->ohci->card.device,
ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; ar_buffer_bus(ctx, i),
dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, PAGE_SIZE, DMA_FROM_DEVICE);
ab, ab_bus); i = ar_next_buffer_index(i);
} }
if (end_buffer_offset > 0)
dma_sync_single_for_cpu(ctx->ohci->card.device,
ar_buffer_bus(ctx, i),
end_buffer_offset, DMA_FROM_DEVICE);
} }
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
...@@ -689,6 +791,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) ...@@ -689,6 +791,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.header[3] = cond_le32_to_cpu(buffer[3]); p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16; p.header_length = 16;
p.payload_length = p.header[3] >> 16; p.payload_length = p.header[3] >> 16;
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
ar_context_abort(ctx, "invalid packet length");
return NULL;
}
break; break;
case TCODE_WRITE_RESPONSE: case TCODE_WRITE_RESPONSE:
...@@ -699,9 +805,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) ...@@ -699,9 +805,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break; break;
default: default:
/* FIXME: Stop context, discard everything, and restart? */ ar_context_abort(ctx, "invalid tcode");
p.header_length = 0; return NULL;
p.payload_length = 0;
} }
p.payload = (void *) buffer + p.header_length; p.payload = (void *) buffer + p.header_length;
...@@ -751,121 +856,147 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) ...@@ -751,121 +856,147 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
return buffer + length + 1; return buffer + length + 1;
} }
static void ar_context_tasklet(unsigned long data) static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
{ {
struct ar_context *ctx = (struct ar_context *)data; void *next;
struct ar_buffer *ab;
struct descriptor *d;
void *buffer, *end;
__le16 res_count;
ab = ctx->current_buffer; while (p < end) {
d = &ab->descriptor; next = handle_ar_packet(ctx, p);
if (!next)
return p;
p = next;
}
res_count = ACCESS_ONCE(d->res_count); return p;
if (res_count == 0) { }
size_t size, size2, rest, pktsize, size3, offset;
dma_addr_t start_bus;
void *start;
/* static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
* This descriptor is finished and we may have a {
* packet split across this and the next buffer. We unsigned int i;
* reuse the page for reassembling the split packet.
*/
offset = offsetof(struct ar_buffer, data); i = ar_first_buffer_index(ctx);
start = ab; while (i != end_buffer) {
start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; dma_sync_single_for_device(ctx->ohci->card.device,
buffer = ab->data; ar_buffer_bus(ctx, i),
PAGE_SIZE, DMA_FROM_DEVICE);
ab = ab->next; ar_context_link_page(ctx, i);
d = &ab->descriptor; i = ar_next_buffer_index(i);
size = start + PAGE_SIZE - ctx->pointer;
/* valid buffer data in the next page */
rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
/* what actually fits in this page */
size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
memmove(buffer, ctx->pointer, size);
memcpy(buffer + size, ab->data, size2);
while (size > 0) {
void *next = handle_ar_packet(ctx, buffer);
pktsize = next - buffer;
if (pktsize >= size) {
/*
* We have handled all the data that was
* originally in this page, so we can now
* continue in the next page.
*/
buffer = next;
break;
}
/* move the next packet to the start of the buffer */
memmove(buffer, next, size + size2 - pktsize);
size -= pktsize;
/* fill up this page again */
size3 = min(rest - size2,
(size_t)PAGE_SIZE - offset - size - size2);
memcpy(buffer + size + size2,
(void *) ab->data + size2, size3);
size2 += size3;
} }
}
if (rest > 0) { static void ar_context_tasklet(unsigned long data)
/* handle the packets that are fully in the next page */ {
buffer = (void *) ab->data + struct ar_context *ctx = (struct ar_context *)data;
(buffer - (start + offset + size)); unsigned int end_buffer_index, end_buffer_offset;
end = (void *) ab->data + rest; void *p, *end;
while (buffer < end) p = ctx->pointer;
buffer = handle_ar_packet(ctx, buffer); if (!p)
return;
ctx->current_buffer = ab; end_buffer_index = ar_search_last_active_buffer(ctx,
ctx->pointer = end; &end_buffer_offset);
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
ar_context_link_page(ctx, start, start_bus); if (end_buffer_index < ar_first_buffer_index(ctx)) {
} else { /*
ctx->pointer = start + PAGE_SIZE; * The filled part of the overall buffer wraps around; handle
* all packets up to the buffer end here. If the last packet
* wraps around, its tail will be visible after the buffer end
* because the buffer start pages are mapped there again.
*/
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end);
if (p < buffer_end)
goto error;
/* adjust p to point back into the actual buffer */
p -= AR_BUFFERS * PAGE_SIZE;
} }
} else {
buffer = ctx->pointer;
ctx->pointer = end =
(void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
while (buffer < end) p = handle_ar_packets(ctx, p, end);
buffer = handle_ar_packet(ctx, buffer); if (p != end) {
if (p > end)
ar_context_abort(ctx, "inconsistent descriptor");
goto error;
} }
ctx->pointer = p;
ar_recycle_buffers(ctx, end_buffer_index);
return;
error:
ctx->pointer = NULL;
} }
static int ar_context_init(struct ar_context *ctx, static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
struct fw_ohci *ohci, u32 regs) unsigned int descriptors_offset, u32 regs)
{ {
struct ar_buffer ab; unsigned int i;
dma_addr_t dma_addr;
struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
struct descriptor *d;
ctx->regs = regs; ctx->regs = regs;
ctx->ohci = ohci; ctx->ohci = ohci;
ctx->last_buffer = &ab;
tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
ar_context_add_page(ctx); for (i = 0; i < AR_BUFFERS; i++) {
ar_context_add_page(ctx); ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
ctx->current_buffer = ab.next; if (!ctx->pages[i])
ctx->pointer = ctx->current_buffer->data; goto out_of_memory;
dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(ohci->card.device, dma_addr)) {
__free_page(ctx->pages[i]);
ctx->pages[i] = NULL;
goto out_of_memory;
}
set_page_private(ctx->pages[i], dma_addr);
}
for (i = 0; i < AR_BUFFERS; i++)
pages[i] = ctx->pages[i];
for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
pages[AR_BUFFERS + i] = ctx->pages[i];
ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
-1, PAGE_KERNEL_RO);
if (!ctx->buffer)
goto out_of_memory;
ctx->descriptors = ohci->misc_buffer + descriptors_offset;
ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
for (i = 0; i < AR_BUFFERS; i++) {
d = &ctx->descriptors[i];
d->req_count = cpu_to_le16(PAGE_SIZE);
d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
d->branch_address = cpu_to_le32(ctx->descriptors_bus +
ar_next_buffer_index(i) * sizeof(struct descriptor));
}
return 0; return 0;
out_of_memory:
ar_context_release(ctx);
return -ENOMEM;
} }
static void ar_context_run(struct ar_context *ctx) static void ar_context_run(struct ar_context *ctx)
{ {
struct ar_buffer *ab = ctx->current_buffer; unsigned int i;
dma_addr_t ab_bus;
size_t offset;
offset = offsetof(struct ar_buffer, data); for (i = 0; i < AR_BUFFERS; i++)
ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; ar_context_link_page(ctx, i);
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); ctx->pointer = ctx->buffer;
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
flush_writes(ctx->ohci); flush_writes(ctx->ohci);
} }
...@@ -1042,6 +1173,7 @@ static void context_run(struct context *ctx, u32 extra) ...@@ -1042,6 +1173,7 @@ static void context_run(struct context *ctx, u32 extra)
le32_to_cpu(ctx->last->branch_address)); le32_to_cpu(ctx->last->branch_address));
reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
ctx->running = true;
flush_writes(ohci); flush_writes(ohci);
} }
...@@ -1069,6 +1201,7 @@ static void context_stop(struct context *ctx) ...@@ -1069,6 +1201,7 @@ static void context_stop(struct context *ctx)
int i; int i;
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
ctx->running = false;
flush_writes(ctx->ohci); flush_writes(ctx->ohci);
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
...@@ -1099,7 +1232,6 @@ static int at_context_queue_packet(struct context *ctx, ...@@ -1099,7 +1232,6 @@ static int at_context_queue_packet(struct context *ctx,
struct descriptor *d, *last; struct descriptor *d, *last;
__le32 *header; __le32 *header;
int z, tcode; int z, tcode;
u32 reg;
d = context_get_descriptors(ctx, 4, &d_bus); d = context_get_descriptors(ctx, 4, &d_bus);
if (d == NULL) { if (d == NULL) {
...@@ -1113,21 +1245,27 @@ static int at_context_queue_packet(struct context *ctx, ...@@ -1113,21 +1245,27 @@ static int at_context_queue_packet(struct context *ctx,
/* /*
* The DMA format for asyncronous link packets is different * The DMA format for asyncronous link packets is different
* from the IEEE1394 layout, so shift the fields around * from the IEEE1394 layout, so shift the fields around
* accordingly. If header_length is 8, it's a PHY packet, to * accordingly.
* which we need to prepend an extra quadlet.
*/ */
tcode = (packet->header[0] >> 4) & 0x0f;
header = (__le32 *) &d[1]; header = (__le32 *) &d[1];
switch (packet->header_length) { switch (tcode) {
case 16: case TCODE_WRITE_QUADLET_REQUEST:
case 12: case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_WRITE_RESPONSE:
case TCODE_READ_QUADLET_REQUEST:
case TCODE_READ_BLOCK_REQUEST:
case TCODE_READ_QUADLET_RESPONSE:
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) | header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16)); (packet->speed << 16));
header[1] = cpu_to_le32((packet->header[1] & 0xffff) | header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
(packet->header[0] & 0xffff0000)); (packet->header[0] & 0xffff0000));
header[2] = cpu_to_le32(packet->header[2]); header[2] = cpu_to_le32(packet->header[2]);
tcode = (packet->header[0] >> 4) & 0x0f;
if (TCODE_IS_BLOCK_PACKET(tcode)) if (TCODE_IS_BLOCK_PACKET(tcode))
header[3] = cpu_to_le32(packet->header[3]); header[3] = cpu_to_le32(packet->header[3]);
else else
...@@ -1136,18 +1274,18 @@ static int at_context_queue_packet(struct context *ctx, ...@@ -1136,18 +1274,18 @@ static int at_context_queue_packet(struct context *ctx,
d[0].req_count = cpu_to_le16(packet->header_length); d[0].req_count = cpu_to_le16(packet->header_length);
break; break;
case 8: case TCODE_LINK_INTERNAL:
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
(packet->speed << 16)); (packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0]); header[1] = cpu_to_le32(packet->header[1]);
header[2] = cpu_to_le32(packet->header[1]); header[2] = cpu_to_le32(packet->header[2]);
d[0].req_count = cpu_to_le16(12); d[0].req_count = cpu_to_le16(12);
if (is_ping_packet(packet->header)) if (is_ping_packet(&packet->header[1]))
d[0].control |= cpu_to_le16(DESCRIPTOR_PING); d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
break; break;
case 4: case TCODE_STREAM_DATA:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) | header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16)); (packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
...@@ -1197,6 +1335,8 @@ static int at_context_queue_packet(struct context *ctx, ...@@ -1197,6 +1335,8 @@ static int at_context_queue_packet(struct context *ctx,
* some controllers (like a JMicron JMB381 PCI-e) misbehave and wind * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
* up stalling out. So we just bail out in software and try again * up stalling out. So we just bail out in software and try again
* later, and everyone is happy. * later, and everyone is happy.
* FIXME: Test of IntEvent.busReset may no longer be necessary since we
* flush AT queues in bus_reset_tasklet.
* FIXME: Document how the locking works. * FIXME: Document how the locking works.
*/ */
if (ohci->generation != packet->generation || if (ohci->generation != packet->generation ||
...@@ -1210,14 +1350,23 @@ static int at_context_queue_packet(struct context *ctx, ...@@ -1210,14 +1350,23 @@ static int at_context_queue_packet(struct context *ctx,
context_append(ctx, d, z, 4 - z); context_append(ctx, d, z, 4 - z);
/* If the context isn't already running, start it up. */ if (!ctx->running)
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
if ((reg & CONTEXT_RUN) == 0)
context_run(ctx, 0); context_run(ctx, 0);
return 0; return 0;
} }
static void at_context_flush(struct context *ctx)
{
tasklet_disable(&ctx->tasklet);
ctx->flushing = true;
context_tasklet((unsigned long)ctx);
ctx->flushing = false;
tasklet_enable(&ctx->tasklet);
}
static int handle_at_packet(struct context *context, static int handle_at_packet(struct context *context,
struct descriptor *d, struct descriptor *d,
struct descriptor *last) struct descriptor *last)
...@@ -1227,7 +1376,7 @@ static int handle_at_packet(struct context *context, ...@@ -1227,7 +1376,7 @@ static int handle_at_packet(struct context *context,
struct fw_ohci *ohci = context->ohci; struct fw_ohci *ohci = context->ohci;
int evt; int evt;
if (last->transfer_status == 0) if (last->transfer_status == 0 && !context->flushing)
/* This descriptor isn't done yet, stop iteration. */ /* This descriptor isn't done yet, stop iteration. */
return 0; return 0;
...@@ -1261,11 +1410,15 @@ static int handle_at_packet(struct context *context, ...@@ -1261,11 +1410,15 @@ static int handle_at_packet(struct context *context,
break; break;
case OHCI1394_evt_missing_ack: case OHCI1394_evt_missing_ack:
if (context->flushing)
packet->ack = RCODE_GENERATION;
else {
/* /*
* Using a valid (current) generation count, but the * Using a valid (current) generation count, but the
* node is not on the bus or not sending acks. * node is not on the bus or not sending acks.
*/ */
packet->ack = RCODE_NO_ACK; packet->ack = RCODE_NO_ACK;
}
break; break;
case ACK_COMPLETE + 0x10: case ACK_COMPLETE + 0x10:
...@@ -1278,6 +1431,13 @@ static int handle_at_packet(struct context *context, ...@@ -1278,6 +1431,13 @@ static int handle_at_packet(struct context *context,
packet->ack = evt - 0x10; packet->ack = evt - 0x10;
break; break;
case OHCI1394_evt_no_status:
if (context->flushing) {
packet->ack = RCODE_GENERATION;
break;
}
/* fall through */
default: default:
packet->ack = RCODE_SEND_ERROR; packet->ack = RCODE_SEND_ERROR;
break; break;
...@@ -1583,9 +1743,23 @@ static void bus_reset_tasklet(unsigned long data) ...@@ -1583,9 +1743,23 @@ static void bus_reset_tasklet(unsigned long data)
/* FIXME: Document how the locking works. */ /* FIXME: Document how the locking works. */
spin_lock_irqsave(&ohci->lock, flags); spin_lock_irqsave(&ohci->lock, flags);
ohci->generation = generation; ohci->generation = -1; /* prevent AT packet queueing */
context_stop(&ohci->at_request_ctx); context_stop(&ohci->at_request_ctx);
context_stop(&ohci->at_response_ctx); context_stop(&ohci->at_response_ctx);
spin_unlock_irqrestore(&ohci->lock, flags);
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
* packets in the AT queues and software needs to drain them.
* Some OHCI 1.1 controllers (JMicron) apparently require this too.
*/
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
spin_lock_irqsave(&ohci->lock, flags);
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
if (ohci->quirks & QUIRK_RESET_PACKET) if (ohci->quirks & QUIRK_RESET_PACKET)
...@@ -1653,8 +1827,12 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1653,8 +1827,12 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event) if (!event || !~event)
return IRQ_NONE; return IRQ_NONE;
/* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ /*
reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); * busReset and postedWriteErr must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
log_irqs(event); log_irqs(event);
if (event & OHCI1394_selfIDComplete) if (event & OHCI1394_selfIDComplete)
...@@ -1672,30 +1850,41 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1672,30 +1850,41 @@ static irqreturn_t irq_handler(int irq, void *data)
if (event & OHCI1394_respTxComplete) if (event & OHCI1394_respTxComplete)
tasklet_schedule(&ohci->at_response_ctx.tasklet); tasklet_schedule(&ohci->at_response_ctx.tasklet);
if (event & OHCI1394_isochRx) {
iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
while (iso_event) { while (iso_event) {
i = ffs(iso_event) - 1; i = ffs(iso_event) - 1;
tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); tasklet_schedule(
&ohci->ir_context_list[i].context.tasklet);
iso_event &= ~(1 << i); iso_event &= ~(1 << i);
} }
}
if (event & OHCI1394_isochTx) {
iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
while (iso_event) { while (iso_event) {
i = ffs(iso_event) - 1; i = ffs(iso_event) - 1;
tasklet_schedule(&ohci->it_context_list[i].context.tasklet); tasklet_schedule(
&ohci->it_context_list[i].context.tasklet);
iso_event &= ~(1 << i); iso_event &= ~(1 << i);
} }
}
if (unlikely(event & OHCI1394_regAccessFail)) if (unlikely(event & OHCI1394_regAccessFail))
fw_error("Register access failure - " fw_error("Register access failure - "
"please notify linux1394-devel@lists.sf.net\n"); "please notify linux1394-devel@lists.sf.net\n");
if (unlikely(event & OHCI1394_postedWriteErr)) if (unlikely(event & OHCI1394_postedWriteErr)) {
reg_read(ohci, OHCI1394_PostedWriteAddressHi);
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
fw_error("PCI posted write error\n"); fw_error("PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) { if (unlikely(event & OHCI1394_cycleTooLong)) {
if (printk_ratelimit()) if (printk_ratelimit())
...@@ -1719,7 +1908,8 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1719,7 +1908,8 @@ static irqreturn_t irq_handler(int irq, void *data)
spin_lock(&ohci->lock); spin_lock(&ohci->lock);
update_bus_time(ohci); update_bus_time(ohci);
spin_unlock(&ohci->lock); spin_unlock(&ohci->lock);
} } else
flush_writes(ohci);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2495,6 +2685,10 @@ static int ohci_start_iso(struct fw_iso_context *base, ...@@ -2495,6 +2685,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
context_run(&ctx->context, control); context_run(&ctx->context, control);
ctx->sync = sync;
ctx->tags = tags;
break; break;
} }
...@@ -2592,6 +2786,26 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) ...@@ -2592,6 +2786,26 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
return ret; return ret;
} }
#ifdef CONFIG_PM
static void ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
for (i = 0 ; i < ohci->n_ir ; i++) {
ctx = &ohci->ir_context_list[i];
if (ctx->context.running)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
for (i = 0 ; i < ohci->n_it ; i++) {
ctx = &ohci->it_context_list[i];
if (ctx->context.running)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
#endif
static int queue_iso_transmit(struct iso_context *ctx, static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet, struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer, struct fw_iso_buffer *buffer,
...@@ -2901,7 +3115,7 @@ static int __devinit pci_probe(struct pci_dev *dev, ...@@ -2901,7 +3115,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
struct fw_ohci *ohci; struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version; u32 bus_options, max_receive, link_speed, version;
u64 guid; u64 guid;
int i, err, n_ir, n_it; int i, err;
size_t size; size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
...@@ -2955,31 +3169,55 @@ static int __devinit pci_probe(struct pci_dev *dev, ...@@ -2955,31 +3169,55 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (param_quirks) if (param_quirks)
ohci->quirks = param_quirks; ohci->quirks = param_quirks;
ar_context_init(&ohci->ar_request_ctx, ohci, /*
* Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/
* response descriptors and the self IDs buffer.
*/
BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
PAGE_SIZE,
&ohci->misc_buffer_bus,
GFP_KERNEL);
if (!ohci->misc_buffer) {
err = -ENOMEM;
goto fail_iounmap;
}
err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
OHCI1394_AsReqRcvContextControlSet); OHCI1394_AsReqRcvContextControlSet);
if (err < 0)
goto fail_misc_buf;
ar_context_init(&ohci->ar_response_ctx, ohci, err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
OHCI1394_AsRspRcvContextControlSet); OHCI1394_AsRspRcvContextControlSet);
if (err < 0)
goto fail_arreq_ctx;
context_init(&ohci->at_request_ctx, ohci, err = context_init(&ohci->at_request_ctx, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet); OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
goto fail_arrsp_ctx;
context_init(&ohci->at_response_ctx, ohci, err = context_init(&ohci->at_response_ctx, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet); OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
goto fail_atreq_ctx;
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL; ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
n_ir = hweight32(ohci->ir_context_mask); ohci->n_ir = hweight32(ohci->ir_context_mask);
size = sizeof(struct iso_context) * n_ir; size = sizeof(struct iso_context) * ohci->n_ir;
ohci->ir_context_list = kzalloc(size, GFP_KERNEL); ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
n_it = hweight32(ohci->it_context_mask); ohci->n_it = hweight32(ohci->it_context_mask);
size = sizeof(struct iso_context) * n_it; size = sizeof(struct iso_context) * ohci->n_it;
ohci->it_context_list = kzalloc(size, GFP_KERNEL); ohci->it_context_list = kzalloc(size, GFP_KERNEL);
if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
...@@ -2987,15 +3225,8 @@ static int __devinit pci_probe(struct pci_dev *dev, ...@@ -2987,15 +3225,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
goto fail_contexts; goto fail_contexts;
} }
/* self-id dma buffer allocation */ ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
SELF_ID_BUF_SIZE,
&ohci->self_id_bus,
GFP_KERNEL);
if (ohci->self_id_cpu == NULL) {
err = -ENOMEM;
goto fail_contexts;
}
bus_options = reg_read(ohci, OHCI1394_BusOptions); bus_options = reg_read(ohci, OHCI1394_BusOptions);
max_receive = (bus_options >> 12) & 0xf; max_receive = (bus_options >> 12) & 0xf;
...@@ -3005,26 +3236,30 @@ static int __devinit pci_probe(struct pci_dev *dev, ...@@ -3005,26 +3236,30 @@ static int __devinit pci_probe(struct pci_dev *dev,
err = fw_card_add(&ohci->card, max_receive, link_speed, guid); err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err) if (err)
goto fail_self_id; goto fail_contexts;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
"%d IR + %d IT contexts, quirks 0x%x\n", "%d IR + %d IT contexts, quirks 0x%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff, dev_name(&dev->dev), version >> 16, version & 0xff,
n_ir, n_it, ohci->quirks); ohci->n_ir, ohci->n_it, ohci->quirks);
return 0; return 0;
fail_self_id:
dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
ohci->self_id_cpu, ohci->self_id_bus);
fail_contexts: fail_contexts:
kfree(ohci->ir_context_list); kfree(ohci->ir_context_list);
kfree(ohci->it_context_list); kfree(ohci->it_context_list);
context_release(&ohci->at_response_ctx); context_release(&ohci->at_response_ctx);
fail_atreq_ctx:
context_release(&ohci->at_request_ctx); context_release(&ohci->at_request_ctx);
fail_arrsp_ctx:
ar_context_release(&ohci->ar_response_ctx); ar_context_release(&ohci->ar_response_ctx);
fail_arreq_ctx:
ar_context_release(&ohci->ar_request_ctx); ar_context_release(&ohci->ar_request_ctx);
fail_misc_buf:
dma_free_coherent(ohci->card.device, PAGE_SIZE,
ohci->misc_buffer, ohci->misc_buffer_bus);
fail_iounmap:
pci_iounmap(dev, ohci->registers); pci_iounmap(dev, ohci->registers);
fail_iomem: fail_iomem:
pci_release_region(dev, 0); pci_release_region(dev, 0);
...@@ -3063,10 +3298,10 @@ static void pci_remove(struct pci_dev *dev) ...@@ -3063,10 +3298,10 @@ static void pci_remove(struct pci_dev *dev)
if (ohci->config_rom) if (ohci->config_rom)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
ohci->config_rom, ohci->config_rom_bus); ohci->config_rom, ohci->config_rom_bus);
dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
ohci->self_id_cpu, ohci->self_id_bus);
ar_context_release(&ohci->ar_request_ctx); ar_context_release(&ohci->ar_request_ctx);
ar_context_release(&ohci->ar_response_ctx); ar_context_release(&ohci->ar_response_ctx);
dma_free_coherent(ohci->card.device, PAGE_SIZE,
ohci->misc_buffer, ohci->misc_buffer_bus);
context_release(&ohci->at_request_ctx); context_release(&ohci->at_request_ctx);
context_release(&ohci->at_response_ctx); context_release(&ohci->at_response_ctx);
kfree(ohci->it_context_list); kfree(ohci->it_context_list);
...@@ -3117,7 +3352,20 @@ static int pci_resume(struct pci_dev *dev) ...@@ -3117,7 +3352,20 @@ static int pci_resume(struct pci_dev *dev)
return err; return err;
} }
return ohci_enable(&ohci->card, NULL, 0); /* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
!reg_read(ohci, OHCI1394_GUIDHi)) {
reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
}
err = ohci_enable(&ohci->card, NULL, 0);
if (err)
return err;
ohci_resume_iso_dma(ohci);
return 0;
} }
#endif #endif
......
...@@ -302,9 +302,9 @@ struct fw_packet { ...@@ -302,9 +302,9 @@ struct fw_packet {
struct fw_transaction { struct fw_transaction {
int node_id; /* The generation is implied; it is always the current. */ int node_id; /* The generation is implied; it is always the current. */
int tlabel; int tlabel;
int timestamp;
struct list_head link; struct list_head link;
struct fw_card *card; struct fw_card *card;
bool is_split_transaction;
struct timer_list split_timeout_timer; struct timer_list split_timeout_timer;
struct fw_packet packet; struct fw_packet packet;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment