Commit 0943dcc1 authored by Ben Collins's avatar Ben Collins Committed by Linus Torvalds

[PATCH] IEEE-1394 Updates

ieee1394 updates:
  - New drivers: eth1394, admtp
  - nodemgr cleanup
  - Fixes for ohci
  - fixed for node probes
  - small misc performance fixes
  - New /proc interface for subsystem, node listing, and dv1394
parent 83ea848f
......@@ -90,6 +90,31 @@ CONFIG_IEEE1394_RAWIO
say M here and read <file:Documentation/modules.txt>. The module
will be called raw1394.o.
CONFIG_IEEE1394_ETH1394
Extremely Experimental! This driver is a Linux specific way to use your
IEEE1394 Host as an Ethernet type device. This is _NOT_ IP1394.
CONFIG_IEEE1394_AMDTP
This option enables the Audio & Music Data Transmission Protocol
(IEC61883-6) driver, which implements audio transmission over
IEEE1394.
The userspace interface is documented in amdtp.h.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt>. The module
will be called amdtp.o.
CONFIG_IEEE1394_CMP
This option enables the Connection Management Procedures
(IEC61883-1) driver, which implements input and output plugs.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt>. The module
will be called amdtp.o.
CONFIG_IEEE1394_VERBOSEDEBUG
If you say Y here, you will get very verbose debugging logs from the
subsystem which includes a dump of the header of every sent and
......@@ -99,4 +124,3 @@ CONFIG_IEEE1394_VERBOSEDEBUG
Say Y if you really want or need the debugging output, everyone else
says N.
......@@ -19,8 +19,14 @@ if [ "$CONFIG_PCI" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
comment "Protocol Drivers"
dep_tristate ' OHCI-1394 Video support' CONFIG_IEEE1394_VIDEO1394 $CONFIG_IEEE1394_OHCI1394
dep_tristate ' SBP-2 support (Harddisks etc.)' CONFIG_IEEE1394_SBP2 $CONFIG_SCSI $CONFIG_IEEE1394
dep_tristate ' Ethernet over 1394' CONFIG_IEEE1394_ETH1394 $CONFIG_IEEE1394
dep_tristate ' OHCI-DV I/O support' CONFIG_IEEE1394_DV1394 $CONFIG_IEEE1394_OHCI1394
dep_tristate ' Raw IEEE1394 I/O support' CONFIG_IEEE1394_RAWIO $CONFIG_IEEE1394
dep_tristate ' IEC61883-1 Plug support' CONFIG_IEEE1394_CMP $CONFIG_IEEE1394
if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
dep_tristate ' IEC61883-6 (Audio transmission) support' CONFIG_IEEE1394_AMDTP $CONFIG_IEEE1394_OHCI1394 $CONFIG_IEEE1394_CMP
fi
bool 'Excessive debugging output' CONFIG_IEEE1394_VERBOSEDEBUG
fi
......
......@@ -4,7 +4,7 @@
O_TARGET := ieee1394drv.o
export-objs := ieee1394_core.o ohci1394.o
export-objs := ieee1394_core.o ohci1394.o cmp.o
list-multi := ieee1394.o
ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
......@@ -17,6 +17,9 @@ obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
obj-$(CONFIG_IEEE1394_CMP) += cmp.o
include $(TOPDIR)/Rules.make
......
/* -*- c-basic-offset: 8 -*-
*
* amdtp.c - Audio and Music Data Transmission Protocol Driver
* Copyright (C) 2001 Kristian Hgsberg
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* OVERVIEW
* --------
*
* The AMDTP driver is designed to expose the IEEE1394 bus as a
* regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and
* then your favourite MP3 player, game or whatever sound program will
* output to an IEEE1394 isochronous channel. The signal destination
* could be a set of IEEE1394 loudspeakers (if and when such things
* become available) or an amplifier with IEEE1394 input (like the
* Sony STR-LSA1). The driver only handles the actual streaming, some
* connection management is also required for this to actually work.
* That is outside the scope of this driver, and furthermore it is not
* really standardized yet.
*
* The Audio and Music Data Tranmission Protocol is avaiable at
*
* http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
*
*
* TODO
* ----
*
* - We should be able to change input sample format between LE/BE, as
* we already shift the bytes around when we construct the iso
* packets.
*
* - Fix DMA stop after bus reset!
*
* - Implement poll.
*
* - Clean up iso context handling in ohci1394.
*
*
* MAYBE TODO
* ----------
*
* - Receive data for local playback or recording. Playback requires
* soft syncing with the sound card.
*
* - Signal processing, i.e. receive packets, do some processing, and
* transmit them again using the same packet structure and timestamps
* offset by processing time.
*
* - Maybe make an ALSA interface, that is, create a file_ops
* implementation that recognizes ALSA ioctls and uses defaults for
* things that can't be controlled through ALSA (iso channel).
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include "hosts.h"
#include "highlevel.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "ohci1394.h"
#include "amdtp.h"
#include "cmp.h"
#define FMT_AMDTP 0x10
#define FDF_AM824 0x00
#define FDF_SFC_32KHZ 0x00 /* 32kHz */
#define FDF_SFC_44K1HZ 0x01 /* 44.1kHz */
#define FDF_SFC_48KHZ 0x02 /* 44.1kHz */
struct descriptor_block {
struct output_more_immediate {
u32 control;
u32 pad0;
u32 skip;
u32 pad1;
u32 header[4];
} header_desc;
struct output_last {
u32 control;
u32 data_address;
u32 branch;
u32 status;
} payload_desc;
};
struct packet {
struct descriptor_block *db;
dma_addr_t db_bus;
quadlet_t *payload;
dma_addr_t payload_bus;
};
struct fraction {
int integer;
int numerator;
int denominator;
int counter;
};
#define PACKET_LIST_SIZE 256
#define MAX_PACKET_LISTS 4
struct packet_list {
struct list_head link;
int last_cycle_count;
struct packet packets[PACKET_LIST_SIZE];
};
#define BUFFER_SIZE 128
/* This implements a circular buffer for incoming samples. */
struct buffer {
int head, tail, length, size;
unsigned char data[0];
};
struct stream {
int iso_channel;
int format;
int rate;
int dimension;
int fdf;
struct cmp_pcr *opcr;
/* Input samples are copied here. */
struct buffer *input;
/* ISO Packer state */
unsigned char dbc;
struct packet_list *current_packet_list;
int current_packet;
struct fraction packet_size_fraction;
/* We use these to generate control bits when we are packing
* iec958 data.
*/
int iec958_frame_count;
int iec958_rate_code;
/* The cycle_count and cycle_offset fields are used for the
* synchronization timestamps (syt) in the cip header. They
* are incremented by at least a cycle every time we put a
* time stamp in a packet. As we dont time stamp all
* packages, cycle_count isn't updated in every cycle, and
* sometimes it's incremented by 2. Thus, we have
* cycle_count2, which is simply incremented by one with each
* packet, so we can compare it to the transmission time
* written back in the dma programs.
*/
atomic_t cycle_count, cycle_count2;
int cycle_offset;
struct fraction syt_fraction;
int syt_interval;
int stale_count;
/* Theses fields control the sample output to the DMA engine.
* The dma_packet_lists list holds packet lists currently
* queued for dma; the head of the list is currently being
* processed. The last program in a packet list generates an
* interrupt, which removes the head from dma_packet_lists and
* puts it back on the free list.
*/
struct list_head dma_packet_lists;
struct list_head free_packet_lists;
wait_queue_head_t packet_list_wait;
spinlock_t packet_list_lock;
int iso_context;
struct pci_pool *descriptor_pool, *packet_pool;
/* Streams at a host controller are chained through this field. */
struct list_head link;
struct amdtp_host *host;
};
struct amdtp_host {
struct hpsb_host *host;
struct ti_ohci *ohci;
struct list_head stream_list;
spinlock_t stream_list_lock;
struct list_head link;
};
static struct hpsb_highlevel *amdtp_highlevel;
static LIST_HEAD(host_list);
static spinlock_t host_list_lock = SPIN_LOCK_UNLOCKED;
/* FIXME: This doesn't belong here... */
#define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000
#define OHCI1394_CONTEXT_RUN 0x00008000
#define OHCI1394_CONTEXT_WAKE 0x00001000
#define OHCI1394_CONTEXT_DEAD 0x00000800
#define OHCI1394_CONTEXT_ACTIVE 0x00000400
static inline int ohci1394_alloc_it_ctx(struct ti_ohci *ohci)
{
int i;
for (i = 0; i < ohci->nb_iso_xmit_ctx; i++)
if (!test_and_set_bit(i, &ohci->it_ctx_usage))
return i;
return -EBUSY;
}
static inline void ohci1394_free_it_ctx(struct ti_ohci *ohci, int ctx)
{
clear_bit(ctx, &ohci->it_ctx_usage);
}
void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx,
dma_addr_t first_cmd, int z, int cycle_match)
{
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
reg_write(ohci, OHCI1394_IsoXmitCommandPtr + ctx * 16, first_cmd | z);
reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, ~0);
wmb();
reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
OHCI1394_CONTEXT_CYCLE_MATCH | (cycle_match << 16) |
OHCI1394_CONTEXT_RUN);
}
void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx)
{
reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
OHCI1394_CONTEXT_WAKE);
}
void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx)
{
u32 control;
int wait;
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << ctx);
reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16,
OHCI1394_CONTEXT_RUN);
wmb();
for (wait = 0; wait < 5; wait++) {
control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
}
/* Note: we can test if free_packet_lists is empty without aquiring
* the packet_list_lock. The interrupt handler only adds to the free
* list, there is no race condition between testing the list non-empty
* and acquiring the lock.
*/
static struct packet_list *stream_get_free_packet_list(struct stream *s)
{
struct packet_list *pl;
unsigned long flags;
if (list_empty(&s->free_packet_lists))
return NULL;
spin_lock_irqsave(&s->packet_list_lock, flags);
pl = list_entry(s->free_packet_lists.next, struct packet_list, link);
list_del(&pl->link);
spin_unlock_irqrestore(&s->packet_list_lock, flags);
return pl;
}
static void stream_put_dma_packet_list(struct stream *s,
struct packet_list *pl)
{
unsigned long flags;
struct packet_list *prev;
/* Remember the cycle_count used for timestamping the last packet. */
pl->last_cycle_count = atomic_read(&s->cycle_count2) - 1;
pl->packets[PACKET_LIST_SIZE - 1].db->payload_desc.branch = 0;
spin_lock_irqsave(&s->packet_list_lock, flags);
list_add_tail(&pl->link, &s->dma_packet_lists);
spin_unlock_irqrestore(&s->packet_list_lock, flags);
prev = list_entry(pl->link.prev, struct packet_list, link);
if (pl->link.prev != &s->dma_packet_lists) {
struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1];
last->db->payload_desc.branch = pl->packets[0].db_bus | 3;
ohci1394_wake_it_ctx(s->host->ohci, s->iso_context);
}
else {
u32 syt, cycle_count;
cycle_count = reg_read(s->host->host->hostdata,
OHCI1394_IsochronousCycleTimer) >> 12;
syt = (pl->packets[0].payload[1] >> 12) & 0x0f;
cycle_count = (cycle_count & ~0x0f) + 32 + syt;
if ((cycle_count & 0x1fff) >= 8000)
cycle_count = cycle_count - 8000 + 0x2000;
ohci1394_start_it_ctx(s->host->ohci, s->iso_context,
pl->packets[0].db_bus, 3,
cycle_count & 0x7fff);
}
}
static void stream_shift_packet_lists(struct stream *s)
{
struct packet_list *pl;
struct packet *last;
int diff;
if (list_empty(&s->dma_packet_lists)) {
HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__);
return;
}
/* Now that we know the list is non-empty, we can get the head
* of the list without locking, because the process context
* only adds to the tail.
*/
pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
last = &pl->packets[PACKET_LIST_SIZE - 1];
/* This is weird... if we stop dma processing in the middle of
* a packet list, the dma context immediately generates an
* interrupt if we enable it again later. This only happens
* when amdtp_release is interrupted while waiting for dma to
* complete, though. Anyway, we detect this by seeing that
* the status of the dma descriptor that we expected an
* interrupt from is still 0.
*/
if (last->db->payload_desc.status == 0) {
HPSB_INFO("weird interrupt...");
return;
}
/* If the last descriptor block does not specify a branch
* address, we have a sample underflow.
*/
if (last->db->payload_desc.branch == 0)
HPSB_INFO("FIXME: sample underflow...");
/* Here we check when (which cycle) the last packet was sent
* and compare it to what the iso packer was using at the
* time. If there is a mismatch, we adjust the cycle count in
* the iso packer. However, there are still up to
* MAX_PACKET_LISTS packet lists queued with bad time stamps,
* so we disable time stamp monitoring for the next
* MAX_PACKET_LISTS packet lists.
*/
diff = (last->db->payload_desc.status - pl->last_cycle_count) & 0xf;
if (diff > 0 && s->stale_count == 0) {
atomic_add(diff, &s->cycle_count);
atomic_add(diff, &s->cycle_count2);
s->stale_count = MAX_PACKET_LISTS;
}
if (s->stale_count > 0)
s->stale_count--;
/* Finally, we move the packet list that was just processed
* back to the free list, and notify any waiters.
*/
spin_lock(&s->packet_list_lock);
list_del(&pl->link);
list_add_tail(&pl->link, &s->free_packet_lists);
spin_unlock(&s->packet_list_lock);
wake_up_interruptible(&s->packet_list_wait);
}
static struct packet *stream_current_packet(struct stream *s)
{
if (s->current_packet_list == NULL &&
(s->current_packet_list = stream_get_free_packet_list(s)) == NULL)
return NULL;
return &s->current_packet_list->packets[s->current_packet];
}
static void stream_queue_packet(struct stream *s)
{
s->current_packet++;
if (s->current_packet == PACKET_LIST_SIZE) {
stream_put_dma_packet_list(s, s->current_packet_list);
s->current_packet_list = NULL;
s->current_packet = 0;
}
}
/* Integer fractional math. When we transmit a 44k1Hz signal we must
* send 5 41/80 samples per isochronous cycle, as these occur 8000
* times a second. Of course, we must send an integral number of
* samples in a packet, so we use the integer math to alternate
* between sending 5 and 6 samples per packet.
*/
static void fraction_init(struct fraction *f, int numerator, int denominator)
{
f->integer = numerator / denominator;
f->numerator = numerator % denominator;
f->denominator = denominator;
f->counter = 0;
}
static int fraction_next_size(struct fraction *f)
{
return f->integer + ((f->counter + f->numerator) / f->denominator);
}
static void fraction_inc(struct fraction *f)
{
f->counter = (f->counter + f->numerator) % f->denominator;
}
static void amdtp_irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data)
{
struct amdtp_host *host = data;
struct list_head *lh;
struct stream *s = NULL;
spin_lock(&host->stream_list_lock);
list_for_each(lh, &host->stream_list) {
s = list_entry(lh, struct stream, link);
if (isoXmitIntEvent & (1 << s->iso_context))
break;
}
spin_unlock(&host->stream_list_lock);
if (s != NULL)
stream_shift_packet_lists(s);
}
void packet_initialize(struct packet *p, struct packet *next)
{
/* Here we initialize the dma descriptor block for
* transferring one iso packet. We use two descriptors per
* packet: an OUTPUT_MORE_IMMMEDIATE descriptor for the
* IEEE1394 iso packet header and an OUTPUT_LAST descriptor
* for the payload.
*/
p->db->header_desc.control =
DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
p->db->header_desc.skip = 0;
if (next) {
p->db->payload_desc.control =
DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
p->db->payload_desc.branch = next->db_bus | 3;
}
else {
p->db->payload_desc.control =
DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
DMA_CTL_UPDATE | DMA_CTL_IRQ;
p->db->payload_desc.branch = 0;
}
p->db->payload_desc.data_address = p->payload_bus;
p->db->payload_desc.status = 0;
}
struct packet_list *packet_list_alloc(struct stream *s)
{
int i;
struct packet_list *pl;
struct packet *next;
pl = kmalloc(sizeof *pl, SLAB_KERNEL);
if (pl == NULL)
return NULL;
for (i = 0; i < PACKET_LIST_SIZE; i++) {
struct packet *p = &pl->packets[i];
p->db = pci_pool_alloc(s->descriptor_pool, SLAB_KERNEL,
&p->db_bus);
p->payload = pci_pool_alloc(s->packet_pool, SLAB_KERNEL,
&p->payload_bus);
}
for (i = 0; i < PACKET_LIST_SIZE; i++) {
if (i < PACKET_LIST_SIZE - 1)
next = &pl->packets[i + 1];
else
next = NULL;
packet_initialize(&pl->packets[i], next);
}
return pl;
}
void packet_list_free(struct packet_list *pl, struct stream *s)
{
int i;
for (i = 0; i < PACKET_LIST_SIZE; i++) {
struct packet *p = &pl->packets[i];
pci_pool_free(s->descriptor_pool, p->db, p->db_bus);
pci_pool_free(s->packet_pool, p->payload, p->payload_bus);
}
kfree(pl);
}
static struct buffer *buffer_alloc(int size)
{
struct buffer *b;
b = kmalloc(sizeof *b + size, SLAB_KERNEL);
b->head = 0;
b->tail = 0;
b->length = 0;
b->size = size;
return b;
}
static unsigned char *buffer_get_bytes(struct buffer *buffer, int size)
{
unsigned char *p;
if (buffer->head + size > buffer->size)
BUG();
p = &buffer->data[buffer->head];
buffer->head += size;
if (buffer->head == buffer->size)
buffer->head = 0;
buffer->length -= size;
return p;
}
static unsigned char *buffer_put_bytes(struct buffer *buffer,
int max, int *actual)
{
int length;
unsigned char *p;
p = &buffer->data[buffer->tail];
length = min(buffer->size - buffer->length, max);
if (buffer->tail + length < buffer->size) {
*actual = length;
buffer->tail += length;
}
else {
*actual = buffer->size - buffer->tail;
buffer->tail = 0;
}
buffer->length += *actual;
return p;
}
static u32 get_iec958_header_bits(struct stream *s, int sub_frame, u32 sample)
{
int csi, parity, shift;
int block_start;
u32 bits;
switch (s->iec958_frame_count) {
case 1:
csi = s->format == AMDTP_FORMAT_IEC958_AC3;
break;
case 2:
case 9:
csi = 1;
break;
case 24 ... 27:
csi = (s->iec958_rate_code >> (27 - s->iec958_frame_count)) & 0x01;
break;
default:
csi = 0;
break;
}
block_start = (s->iec958_frame_count == 0 && sub_frame == 0);
/* The parity bit is the xor of the sample bits and the
* channel status info bit. */
for (shift = 16, parity = sample ^ csi; shift > 0; shift >>= 1)
parity ^= (parity >> shift);
bits = (block_start << 5) | /* Block start bit */
((sub_frame == 0) << 4) | /* Subframe bit */
((parity & 1) << 3) | /* Parity bit */
(csi << 2); /* Channel status info bit */
return bits;
}
static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
{
switch (s->format) {
case AMDTP_FORMAT_IEC958_PCM:
case AMDTP_FORMAT_IEC958_AC3:
return get_iec958_header_bits(s, sub_frame, sample);
case AMDTP_FORMAT_RAW:
return 0x40000000;
default:
return 0;
}
}
static void fill_packet(struct stream *s, struct packet *packet, int nevents)
{
int size, node_id, i, j;
quadlet_t *event;
unsigned char *p;
u32 control, sample, bits;
int syt_index, syt, next;
size = (nevents * s->dimension + 2) * sizeof(quadlet_t);
node_id = s->host->host->node_id & 0x3f;
/* Update DMA descriptors */
packet->db->payload_desc.status = 0;
control = packet->db->payload_desc.control & 0xffff0000;
packet->db->payload_desc.control = control | size;
/* Fill IEEE1394 headers */
packet->db->header_desc.header[0] =
(SPEED_100 << 16) | (0x01 << 14) |
(s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
packet->db->header_desc.header[1] = size << 16;
/* Fill cip header */
syt_index = s->dbc & (s->syt_interval - 1);
if (syt_index == 0 || syt_index + nevents > s->syt_interval) {
syt = ((atomic_read(&s->cycle_count) << 12) |
s->cycle_offset) & 0xffff;
next = fraction_next_size(&s->syt_fraction) + s->cycle_offset;
/* This next addition should be modulo 8000 (0x1f40),
* but we only use the lower 4 bits of cycle_count, so
* we dont need the modulo. */
atomic_add(next / 3072, &s->cycle_count);
s->cycle_offset = next % 3072;
fraction_inc(&s->syt_fraction);
}
else {
syt = 0xffff;
next = 0;
}
atomic_inc(&s->cycle_count2);
packet->payload[0] = cpu_to_be32((node_id << 24) | (s->dimension << 16) | s->dbc);
packet->payload[1] = cpu_to_be32((1 << 31) | (FMT_AMDTP << 24) | (s->fdf << 16) | syt);
/* Fill payload */
for (i = 0, event = &packet->payload[2]; i < nevents; i++) {
for (j = 0; j < s->dimension; j++) {
p = buffer_get_bytes(s->input, 2);
sample = (p[1] << 16) | (p[0] << 8);
bits = get_header_bits(s, j, sample);
event[j] = cpu_to_be32((bits << 24) | sample);
}
event += s->dimension;
if (++s->iec958_frame_count == 192)
s->iec958_frame_count = 0;
}
s->dbc += nevents;
}
static void stream_flush(struct stream *s)
{
struct packet *p;
int nevents;
while (nevents = fraction_next_size(&s->packet_size_fraction),
p = stream_current_packet(s),
nevents * s->dimension * 2 <= s->input->length && p != NULL) {
fill_packet(s, p, nevents);
fraction_inc(&s->packet_size_fraction);
stream_queue_packet(s);
}
}
static int stream_alloc_packet_lists(struct stream *s)
{
int max_nevents, max_packet_size, i;
max_nevents = s->packet_size_fraction.integer;
if (s->packet_size_fraction.numerator > 0)
max_nevents++;
max_packet_size = max_nevents * s->dimension * 4 + 8;
s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
max_packet_size, 0, 0, SLAB_KERNEL);
if (s->packet_pool == NULL)
return -1;
INIT_LIST_HEAD(&s->free_packet_lists);
INIT_LIST_HEAD(&s->dma_packet_lists);
for (i = 0; i < MAX_PACKET_LISTS; i++) {
struct packet_list *pl = packet_list_alloc(s);
if (pl == NULL)
break;
list_add_tail(&pl->link, &s->free_packet_lists);
}
return i < MAX_PACKET_LISTS ? -1 : 0;
}
static void stream_free_packet_lists(struct stream *s)
{
struct list_head *lh, *next;
if (s->current_packet_list != NULL)
packet_list_free(s->current_packet_list, s);
list_for_each_safe(lh, next, &s->dma_packet_lists)
packet_list_free(list_entry(lh, struct packet_list, link), s);
list_for_each_safe(lh, next, &s->free_packet_lists)
packet_list_free(list_entry(lh, struct packet_list, link), s);
if (s->packet_pool != NULL)
pci_pool_destroy(s->packet_pool);
s->current_packet_list = NULL;
INIT_LIST_HEAD(&s->free_packet_lists);
INIT_LIST_HEAD(&s->dma_packet_lists);
s->packet_pool = NULL;
}
static void plug_update(struct cmp_pcr *plug, void *data)
{
struct stream *s = data;
HPSB_INFO("plug update: p2p_count=%d, channel=%d",
plug->p2p_count, plug->channel);
s->iso_channel = plug->channel;
if (plug->p2p_count > 0) {
/* start streaming */
}
else {
/* stop streaming */
}
}
static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
{
if (cfg->format <= AMDTP_FORMAT_IEC958_AC3)
s->format = cfg->format;
else
return -EINVAL;
switch (cfg->rate) {
case 32000:
s->syt_interval = 8;
s->fdf = FDF_SFC_32KHZ;
s->iec958_rate_code = 0x0c;
s->rate = cfg->rate;
break;
case 44100:
s->syt_interval = 8;
s->fdf = FDF_SFC_44K1HZ;
s->iec958_rate_code = 0x00;
s->rate = cfg->rate;
break;
case 48000:
s->syt_interval = 8;
s->fdf = FDF_SFC_48KHZ;
s->iec958_rate_code = 0x04;
s->rate = cfg->rate;
break;
default:
return -EINVAL;
}
fraction_init(&s->packet_size_fraction, s->rate, 8000);
/* The syt_fraction is initialized to the number of ticks
* between syt_interval events. The number of ticks per
* second is 24.576e6, so the number of ticks between
* syt_interval events is 24.576e6 * syt_interval / rate.
*/
fraction_init(&s->syt_fraction, 24576000 * s->syt_interval, s->rate);
/* When using the AM824 raw subformat we can stream signals of
* any dimension. The IEC958 subformat, however, only
* supports 2 channels.
*/
if (s->format == AMDTP_FORMAT_RAW || cfg->dimension == 2)
s->dimension = cfg->dimension;
else
return -EINVAL;
if (s->opcr != NULL) {
cmp_unregister_opcr(s->host->host, s->opcr);
s->opcr = NULL;
}
switch(cmd) {
case AMDTP_IOC_PLUG:
s->opcr = cmp_register_opcr(s->host->host, cfg->u.plug,
/*payload*/ 12, plug_update, s);
if (s->opcr == NULL)
return -EINVAL;
s->iso_channel = s->opcr->channel;
break;
case AMDTP_IOC_CHANNEL:
if (cfg->u.channel >= 0 && cfg->u.channel < 64)
s->iso_channel = cfg->u.channel;
else
return -EINVAL;
break;
}
/* The ioctl settings were all valid, so we realloc the packet
* lists to make sure the packet size is big enough.
*/
if (s->packet_pool != NULL)
stream_free_packet_lists(s);
if (stream_alloc_packet_lists(s) < 0) {
stream_free_packet_lists(s);
return -ENOMEM;
}
return 0;
}
struct stream *stream_alloc(struct amdtp_host *host)
{
struct stream *s;
unsigned long flags;
const int transfer_delay = 8651; /* approx 352 us */
s = kmalloc(sizeof(struct stream), SLAB_KERNEL);
if (s == NULL)
return NULL;
memset(s, 0, sizeof(struct stream));
s->host = host;
s->input = buffer_alloc(BUFFER_SIZE);
if (s->input == NULL) {
kfree(s);
return NULL;
}
s->cycle_offset = transfer_delay % 3072;
atomic_set(&s->cycle_count, transfer_delay / 3072);
atomic_set(&s->cycle_count2, 0);
s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
sizeof(struct descriptor_block),
16, 0, SLAB_KERNEL);
if (s->descriptor_pool == NULL) {
kfree(s->input);
kfree(s);
return NULL;
}
INIT_LIST_HEAD(&s->free_packet_lists);
INIT_LIST_HEAD(&s->dma_packet_lists);
init_waitqueue_head(&s->packet_list_wait);
spin_lock_init(&s->packet_list_lock);
s->iso_context = ohci1394_alloc_it_ctx(host->ohci);
if (s->iso_context < 0) {
pci_pool_destroy(s->descriptor_pool);
kfree(s->input);
kfree(s);
return NULL;
}
spin_lock_irqsave(&host->stream_list_lock, flags);
list_add_tail(&s->link, &host->stream_list);
spin_unlock_irqrestore(&host->stream_list_lock, flags);
return s;
}
void stream_free(struct stream *s)
{
unsigned long flags;
/* Stop the DMA. We wait for the dma packet list to become
* empty and let the dma controller run out of programs. This
* seems to be more reliable than stopping it directly, since
* that sometimes generates an it transmit interrupt if we
* later re-enable the context.
*/
wait_event_interruptible(s->packet_list_wait,
list_empty(&s->dma_packet_lists));
ohci1394_stop_it_ctx(s->host->ohci, s->iso_context);
ohci1394_free_it_ctx(s->host->ohci, s->iso_context);
if (s->opcr != NULL)
cmp_unregister_opcr(s->host->host, s->opcr);
spin_lock_irqsave(&s->host->stream_list_lock, flags);
list_del(&s->link);
spin_unlock_irqrestore(&s->host->stream_list_lock, flags);
kfree(s->input);
stream_free_packet_lists(s);
pci_pool_destroy(s->descriptor_pool);
kfree(s);
}
/* File operations */
static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
loff_t *offset_is_ignored)
{
struct stream *s = file->private_data;
unsigned char *p;
int i, length;
if (s->packet_pool == NULL)
return -EBADFD;
/* Fill the circular buffer from the input buffer and call the
* iso packer when the buffer is full. The iso packer may
* leave bytes in the buffer for two reasons: either the
* remaining bytes wasn't enough to build a new packet, or
* there were no free packet lists. In the first case we
* re-fill the buffer and call the iso packer again or return
* if we used all the data from userspace. In the second
* case, the wait_event_interruptible will block until the irq
* handler frees a packet list.
*/
for (i = 0; i < count; i += length) {
p = buffer_put_bytes(s->input, count, &length);
copy_from_user(p, buffer + i, length);
if (s->input->length < s->input->size)
continue;
stream_flush(s);
if (s->current_packet_list == NULL &&
wait_event_interruptible(s->packet_list_wait,
!list_empty(&s->free_packet_lists)))
return -EINTR;
}
return count;
}
static int amdtp_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct stream *s = file->private_data;
struct amdtp_ioctl cfg;
int new;
switch(cmd)
{
case AMDTP_IOC_PLUG:
case AMDTP_IOC_CHANNEL:
if (copy_from_user(&cfg, (struct amdtp_ioctl *) arg, sizeof cfg))
return -EFAULT;
else
return stream_configure(s, cmd, &cfg);
case AMDTP_IOC_PING:
HPSB_INFO("ping: offsetting timpestamps %ld ticks", arg);
new = s->cycle_offset + arg;
s->cycle_offset = new % 3072;
atomic_add(new / 3072, &s->cycle_count);
return 0;
case AMDTP_IOC_ZAP:
while (MOD_IN_USE)
MOD_DEC_USE_COUNT;
return 0;
default:
return -EINVAL;
}
}
static int amdtp_open(struct inode *inode, struct file *file)
{
struct amdtp_host *host;
/* FIXME: We just grab the first registered host */
spin_lock(&host_list_lock);
if (!list_empty(&host_list))
host = list_entry(host_list.next, struct amdtp_host, link);
else
host = NULL;
spin_unlock(&host_list_lock);
if (host == NULL)
return -ENODEV;
file->private_data = stream_alloc(host);
if (file->private_data == NULL)
return -ENOMEM;
return 0;
}
static int amdtp_release(struct inode *inode, struct file *file)
{
struct stream *s = file->private_data;
stream_free(s);
return 0;
}
static struct file_operations amdtp_fops =
{
owner: THIS_MODULE,
write: amdtp_write,
ioctl: amdtp_ioctl,
open: amdtp_open,
release: amdtp_release
};
/* IEEE1394 Subsystem functions */
static void amdtp_add_host(struct hpsb_host *host)
{
struct amdtp_host *ah;
/* FIXME: check it's an ohci host. */
ah = kmalloc(sizeof *ah, SLAB_KERNEL);
ah->host = host;
ah->ohci = host->hostdata;
INIT_LIST_HEAD(&ah->stream_list);
spin_lock_init(&ah->stream_list_lock);
spin_lock_irq(&host_list_lock);
list_add_tail(&ah->link, &host_list);
spin_unlock_irq(&host_list_lock);
ohci1394_hook_irq(ah->ohci, amdtp_irq_handler, ah);
}
static void amdtp_remove_host(struct hpsb_host *host)
{
struct list_head *lh;
struct amdtp_host *ah;
spin_lock_irq(&host_list_lock);
list_for_each(lh, &host_list) {
if (list_entry(lh, struct amdtp_host, link)->host == host) {
list_del(lh);
break;
}
}
spin_unlock_irq(&host_list_lock);
if (lh != &host_list) {
ah = list_entry(lh, struct amdtp_host, link);
ohci1394_unhook_irq(ah->ohci, amdtp_irq_handler, ah);
kfree(ah);
}
else
HPSB_ERR("remove_host: bogus ohci host: %p", host);
}
static struct hpsb_highlevel_ops amdtp_highlevel_ops = {
add_host: amdtp_add_host,
remove_host: amdtp_remove_host,
};
/* Module interface */
MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
MODULE_DESCRIPTION("Driver for Audio & Music Data Transmission Protocol "
"on OHCI boards.");
MODULE_SUPPORTED_DEVICE("amdtp");
MODULE_LICENSE("GPL");
static int __init amdtp_init_module (void)
{
if (ieee1394_register_chardev(IEEE1394_MINOR_BLOCK_EXPERIMENTAL,
THIS_MODULE, &amdtp_fops)) {
HPSB_ERR("amdtp: unable to get minor device block");
return -EIO;
}
amdtp_highlevel = hpsb_register_highlevel ("amdtp",
&amdtp_highlevel_ops);
if (amdtp_highlevel == NULL) {
HPSB_ERR("amdtp: unable to register highlevel ops");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_EXPERIMENTAL);
return -EIO;
}
HPSB_INFO("Loaded AMDTP driver");
return 0;
}
static void __exit amdtp_exit_module (void)
{
hpsb_unregister_highlevel(amdtp_highlevel);
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_EXPERIMENTAL);
HPSB_INFO("Unloaded AMDTP driver");
}
module_init(amdtp_init_module);
module_exit(amdtp_exit_module);
/* -*- c-basic-offset: 8 -*- */
#ifndef __AMDTP_H
#define __AMDTP_H
#include <asm/ioctl.h>
#include <asm/types.h>
/* The userspace interface for the Audio & Music Data Transmission
* Protocol driver is really simple. First, open /dev/amdtp, use the
* ioctl to configure format, rate, dimension and either plug or
* channel, then start writing samples.
*
* The formats supported by the driver are listed below.
* AMDTP_FORMAT_RAW corresponds to the AM824 raw format, which can
* carry any number of channels, so use this if you're streaming
* multichannel audio. The AMDTP_FORMAT_IEC958_PCM corresponds to the
* AM824 IEC958 encapsulation without the IEC958 data bit set, using
* AMDTP_FORMAT_IEC958_AC3 will transmit the samples with the data bit
* set, suitable for transmitting compressed AC-3 audio.
*
* The rate field specifies the transmission rate; supported values are
* AMDTP_RATE_32KHZ, AMDTP_RATE_44K1HZ and AMDTP_RATE_48KHZ.
*
* The dimension field specifies the dimension of the signal, that is,
* the number of audio channels. Only AMDTP_FORMAT_RAW supports
* settings greater than 2.
*
* The last thing to specify is either the isochronous channel to use
* or the output plug to connect to. If you know what channel the
* destination device will listen on, you can specify the channel
* directly and use the AMDTP_IOC_CHANNEL ioctl. However, if the
* destination device chooses the channel and uses the IEC61883-1 plug
* mechanism, you can specify an output plug to connect to. The
* driver will pick up the channel number from the plug once the
* destination device locks the output plug control register. In this
* case set the plug field and use the AMDTP_IOC_PLUG ioctl.
*
* Having configured the interface, the driver now accepts writes of
* regular 16 bit signed little endian samples, with the channels
* interleaved. For example, 4 channels would look like:
*
* | sample 0 | sample 1 ...
* | ch. 0 | ch. 1 | ch. 2 | ch. 3 | ch. 0 | ...
* | lsb | msb | lsb | msb | lsb | msb | lsb | msb | lsb | msb | ...
*
*/
/* We use '#' for our ioctl magic number because it's cool. */
#define AMDTP_IOC_CHANNEL _IOW('#', 0, sizeof (struct amdtp_ioctl))
#define AMDTP_IOC_PLUG _IOW('#', 1, sizeof (struct amdtp_ioctl))
#define AMDTP_IOC_PING _IOW('#', 2, sizeof (struct amdtp_ioctl))
#define AMDTP_IOC_ZAP _IO('#', 3)
enum {
AMDTP_FORMAT_RAW,
AMDTP_FORMAT_IEC958_PCM,
AMDTP_FORMAT_IEC958_AC3
};
enum {
AMDTP_RATE_32KHZ,
AMDTP_RATE_44K1HZ,
AMDTP_RATE_48KHZ,
};
struct amdtp_ioctl {
__u32 format;
__u32 rate;
__u32 dimension;
union { __u32 channel; __u32 plug; } u;
};
#endif /* __AMDTP_H */
/* -*- c-basic-offset: 8 -*-
*
* cmp.c - Connection Management Procedures
* Copyright (C) 2001 Kristian Hgsberg
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* TODO
* ----
*
* - Implement IEC61883-1 output plugs and connection management.
* This should probably be part of the general subsystem, as it could
* be shared with dv1394.
*
* - Add IEC61883 unit directory when loading this module. This
* requires a run-time changeable config rom.
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "hosts.h"
#include "highlevel.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "cmp.h"
struct plug {
union {
struct cmp_pcr pcr;
quadlet_t quadlet;
} u;
void (*update)(struct cmp_pcr *plug, void *data);
void *data;
};
struct cmp_host {
struct hpsb_host *host;
union {
struct cmp_mpr ompr;
quadlet_t ompr_quadlet;
} u;
struct plug opcr[2];
union {
struct cmp_mpr impr;
quadlet_t impr_quadlet;
} v;
struct plug ipcr[2];
struct list_head link;
};
enum {
CMP_P2P_CONNECTION,
CMP_BC_CONNECTION
};
#define CSR_PCR_MAP 0x900
#define CSR_PCR_MAP_END 0x9fc
static struct hpsb_highlevel *cmp_highlevel;
static LIST_HEAD(host_list);
static spinlock_t host_list_lock = SPIN_LOCK_UNLOCKED;
static struct cmp_host *
lookup_cmp_host(struct hpsb_host *host)
{
struct cmp_host *ch;
struct list_head *lh;
unsigned long flags;
ch = NULL;
spin_lock_irqsave(&host_list_lock, flags);
list_for_each(lh, &host_list) {
ch = list_entry(lh, struct cmp_host, link);
if (ch->host == host)
break;
}
spin_unlock_irqrestore(&host_list_lock, flags);
if (lh == &host_list)
return NULL;
else
return ch;
}
struct cmp_pcr *
cmp_register_opcr(struct hpsb_host *host, int opcr_number, int payload,
void (*update)(struct cmp_pcr *pcr, void *data),
void *data)
{
struct cmp_host *ch;
struct plug *plug;
ch = lookup_cmp_host(host);
if (opcr_number >= ch->u.ompr.nplugs ||
ch->opcr[opcr_number].update != NULL)
return NULL;
plug = &ch->opcr[opcr_number];
plug->u.pcr.online = 1;
plug->u.pcr.bcast_count = 0;
plug->u.pcr.p2p_count = 0;
plug->u.pcr.overhead = 0;
plug->u.pcr.payload = payload;
plug->update = update;
plug->data = data;
return &plug->u.pcr;
}
void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *opcr)
{
struct cmp_host *ch;
struct plug *plug;
ch = lookup_cmp_host(host);
plug = (struct plug *)opcr;
if (plug - ch->opcr >= ch->u.ompr.nplugs) BUG();
plug->u.pcr.online = 0;
plug->update = NULL;
}
static void reset_plugs(struct cmp_host *ch)
{
int i;
ch->u.ompr.non_persistent_ext = 0xff;
for (i = 0; i < ch->u.ompr.nplugs; i++) {
ch->opcr[i].u.pcr.bcast_count = 0;
ch->opcr[i].u.pcr.p2p_count = 0;
ch->opcr[i].u.pcr.overhead = 0;
}
}
static void cmp_add_host(struct hpsb_host *host)
{
struct cmp_host *ch;
ch = kmalloc(sizeof *ch, SLAB_KERNEL);
if (ch == NULL) {
HPSB_ERR("Failed to allocate cmp_host");
return;
}
memset(ch, 0, sizeof *ch);
ch->host = host;
ch->u.ompr.rate = SPEED_100;
ch->u.ompr.bcast_channel_base = 63;
ch->u.ompr.nplugs = 2;
reset_plugs(ch);
spin_lock_irq(&host_list_lock);
list_add_tail(&ch->link, &host_list);
spin_unlock_irq(&host_list_lock);
}
static void cmp_host_reset(struct hpsb_host *host)
{
struct cmp_host *ch;
ch = lookup_cmp_host(host);
if (ch == NULL) BUG();
reset_plugs(ch);
}
static void cmp_remove_host(struct hpsb_host *host)
{
struct cmp_host *ch;
ch = lookup_cmp_host(host);
if (ch == NULL) BUG();
spin_lock_irq(&host_list_lock);
list_del(&ch->link);
spin_unlock_irq(&host_list_lock);
kfree(ch);
}
static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
u64 addr, unsigned int length)
{
int csraddr = addr - CSR_REGISTER_BASE;
int plug;
struct cmp_host *ch;
if (length != 4)
return RCODE_TYPE_ERROR;
ch = lookup_cmp_host(host);
if (csraddr == 0x900) {
*buf = cpu_to_be32(ch->u.ompr_quadlet);
return RCODE_COMPLETE;
}
else if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
plug = (csraddr - 0x904) / 4;
*buf = cpu_to_be32(ch->opcr[plug].u.quadlet);
return RCODE_COMPLETE;
}
else if (csraddr < 0x980) {
return RCODE_ADDRESS_ERROR;
}
else if (csraddr == 0x980) {
*buf = cpu_to_be32(ch->v.impr_quadlet);
return RCODE_COMPLETE;
}
else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
plug = (csraddr - 0x984) / 4;
*buf = cpu_to_be32(ch->ipcr[plug].u.quadlet);
return RCODE_COMPLETE;
}
else
return RCODE_ADDRESS_ERROR;
}
static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
u64 addr, quadlet_t data, quadlet_t arg, int extcode)
{
int csraddr = addr - CSR_REGISTER_BASE;
int plug;
struct cmp_host *ch;
ch = lookup_cmp_host(host);
if (extcode != EXTCODE_COMPARE_SWAP)
return RCODE_TYPE_ERROR;
if (csraddr == 0x900) {
/* FIXME: Ignore writes to bits 30-31 and 0-7 */
*store = cpu_to_be32(ch->u.ompr_quadlet);
if (arg == cpu_to_be32(ch->u.ompr_quadlet))
ch->u.ompr_quadlet = be32_to_cpu(data);
return RCODE_COMPLETE;
}
if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
plug = (csraddr - 0x904) / 4;
*store = cpu_to_be32(ch->opcr[plug].u.quadlet);
if (arg == *store)
ch->opcr[plug].u.quadlet = be32_to_cpu(data);
if (be32_to_cpu(*store) != ch->opcr[plug].u.quadlet &&
ch->opcr[plug].update != NULL)
ch->opcr[plug].update(&ch->opcr[plug].u.pcr,
ch->opcr[plug].data);
return RCODE_COMPLETE;
}
else if (csraddr < 0x980) {
return RCODE_ADDRESS_ERROR;
}
else if (csraddr == 0x980) {
/* FIXME: Ignore writes to bits 24-31 and 0-7 */
*store = cpu_to_be32(ch->u.ompr_quadlet);
if (arg == cpu_to_be32(ch->u.ompr_quadlet))
ch->u.ompr_quadlet = be32_to_cpu(data);
return RCODE_COMPLETE;
}
else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
plug = (csraddr - 0x984) / 4;
*store = cpu_to_be32(ch->ipcr[plug].u.quadlet);
if (arg == *store)
ch->ipcr[plug].u.quadlet = be32_to_cpu(data);
if (be32_to_cpu(*store) != ch->ipcr[plug].u.quadlet &&
ch->ipcr[plug].update != NULL)
ch->ipcr[plug].update(&ch->ipcr[plug].u.pcr,
ch->ipcr[plug].data);
return RCODE_COMPLETE;
}
else
return RCODE_ADDRESS_ERROR;
}
static struct hpsb_highlevel_ops cmp_highlevel_ops = {
add_host: cmp_add_host,
remove_host: cmp_remove_host,
host_reset: cmp_host_reset,
};
static struct hpsb_address_ops pcr_ops = {
read: pcr_read,
lock: pcr_lock,
};
/* Module interface */
MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
MODULE_DESCRIPTION("Connection Management Procedures (CMP)");
MODULE_SUPPORTED_DEVICE("cmp");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(cmp_register_opcr);
EXPORT_SYMBOL(cmp_unregister_opcr);
static int __init cmp_init_module (void)
{
cmp_highlevel = hpsb_register_highlevel ("cmp",
&cmp_highlevel_ops);
if (cmp_highlevel == NULL) {
HPSB_ERR("cmp: unable to register highlevel ops");
return -EIO;
}
hpsb_register_addrspace(cmp_highlevel, &pcr_ops,
CSR_REGISTER_BASE + CSR_PCR_MAP,
CSR_REGISTER_BASE + CSR_PCR_MAP_END);
HPSB_INFO("Loaded CMP driver");
return 0;
}
static void __exit cmp_exit_module (void)
{
hpsb_unregister_highlevel(cmp_highlevel);
HPSB_INFO("Unloaded CMP driver");
}
module_init(cmp_init_module);
module_exit(cmp_exit_module);
#ifndef __CMP_H
#define __CMP_H
struct cmp_mpr {
u32 nplugs:5;
u32 reserved:3;
u32 persistent_ext:8;
u32 non_persistent_ext:8;
u32 bcast_channel_base:6;
u32 rate:2;
} __attribute__((packed));
struct cmp_pcr {
u32 payload:10;
u32 overhead:4;
u32 speed:2;
u32 channel:6;
u32 reserved:2;
u32 p2p_count:6;
u32 bcast_count:1;
u32 online:1;
} __attribute__((packed));
struct cmp_pcr *cmp_register_opcr(struct hpsb_host *host, int plug,
int payload,
void (*update)(struct cmp_pcr *plug,
void *data),
void *data);
void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *plug);
#endif /* __CMP_H */
......@@ -412,6 +412,10 @@ static inline dma_addr_t dma_offset_to_bus(struct dma_region *dma, unsigned long
/* struct video_card contains all data associated with one instance
of the dv1394 driver
*/
enum modes {
MODE_RECEIVE,
MODE_TRANSMIT
};
struct video_card {
......@@ -574,6 +578,7 @@ struct video_card {
unsigned int current_packet;
int first_frame; /* received first start frame marker? */
enum modes mode;
};
/*
......
......@@ -46,8 +46,6 @@
/*
TODO:
- expose xmit and recv as separate devices
- tunable frame-drop behavior: either loop last frame, or halt transmission
- use a scatter/gather buffer for DMA programs (f->descriptor_pool)
......@@ -73,6 +71,9 @@
- keep all video_cards in a list (for open() via chardev), set file->private_data = video
- dv1394_poll should indicate POLLIN when receiving buffers are available
- add proc fs interface to set cip_n, cip_d, syt_offset, and video signal
- expose xmit and recv as separate devices (not exclusive)
- expose NTSC and PAL as separate devices (can be overridden)
- read/edit channel in procfs
*/
......@@ -169,7 +170,14 @@ static spinlock_t dv1394_cards_lock = SPIN_LOCK_UNLOCKED;
static struct hpsb_highlevel *hl_handle; /* = NULL; */
static devfs_handle_t dv1394_devfs_handle;
static LIST_HEAD(dv1394_devfs);
struct dv1394_devfs_entry {
struct list_head list;
devfs_handle_t devfs;
char name[32];
struct dv1394_devfs_entry *parent;
};
static spinlock_t dv1394_devfs_lock = SPIN_LOCK_UNLOCKED;
/* translate from a struct file* to the corresponding struct video_card* */
......@@ -184,19 +192,71 @@ static inline struct video_card* file_to_video_card(struct file *file)
/* Memory management functions */
/*******************************/
#define MDEBUG(x) do { } while(0) /* Debug memory management */
/* [DaveM] I've recoded most of this so that:
* 1) It's easier to tell what is happening
* 2) It's more portable, especially for translating things
* out of vmalloc mapped areas in the kernel.
* 3) Less unnecessary translations happen.
*
* The code used to assume that the kernel vmalloc mappings
* existed in the page tables of every process, this is simply
* not guarenteed. We now use pgd_offset_k which is the
* defined way to get at the kernel page tables.
*/
/* Given PGD from the address space's page table, return the kernel
* virtual mapping of the physical memory mapped at ADR.
*/
static inline struct page *uvirt_to_page(pgd_t *pgd, unsigned long adr)
{
pmd_t *pmd;
pte_t *ptep, pte;
struct page *ret = NULL;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if(pte_present(pte))
ret = pte_page(pte);
}
}
return ret;
}
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the
* area and marking the pages as reserved, and for
* handling page faults on the rvmalloc()ed buffer
*/
static inline unsigned long kvirt_to_pa(unsigned long adr)
{
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = (unsigned long) page_address(uvirt_to_page(pgd_offset_k(va), va));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
}
static void * rvmalloc(unsigned long size)
{
void * mem;
unsigned long adr;
size=PAGE_ALIGN(size);
unsigned long adr, page;
mem=vmalloc_32(size);
if (mem) {
memset(mem, 0, size); /* Clear the ram out,
no junk to the user */
adr=(unsigned long) mem;
while (size > 0) {
mem_map_reserve(vmalloc_to_page((void *)adr));
page = kvirt_to_pa(adr);
mem_map_reserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -206,12 +266,13 @@ static void * rvmalloc(unsigned long size)
static void rvfree(void * mem, unsigned long size)
{
unsigned long adr;
unsigned long adr, page;
if (mem) {
adr=(unsigned long) mem;
while ((long) size > 0) {
mem_map_unreserve(vmalloc_to_page((void *)adr));
while (size > 0) {
page = kvirt_to_pa(adr);
mem_map_unreserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -1105,9 +1166,9 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
/* fill the sglist with the kernel addresses of pages in the non-contiguous buffer */
for(i = 0; i < video->user_dma.n_pages; i++) {
unsigned long va = (unsigned long) video->user_buf + i * PAGE_SIZE;
unsigned long va = VMALLOC_VMADDR( (unsigned long) video->user_buf + i * PAGE_SIZE );
video->user_dma.sglist[i].page = vmalloc_to_page((void *)va);
video->user_dma.sglist[i].page = uvirt_to_page(pgd_offset_k(va), va);
video->user_dma.sglist[i].length = PAGE_SIZE;
}
......@@ -1238,9 +1299,9 @@ static int do_dv1394_init_default(struct video_card *video)
struct dv1394_init init;
init.api_version = DV1394_API_VERSION;
init.channel = 63;
init.n_frames = 2;
/* the following are now set via proc_fs */
/* the following are now set via proc_fs or devfs */
init.channel = video->channel;
init.format = video->pal_or_ntsc;
init.cip_n = video->cip_n;
init.cip_d = video->cip_d;
......@@ -1431,7 +1492,7 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
static struct page * dv1394_nopage(struct vm_area_struct * area, unsigned long address, int write_access)
{
unsigned long offset;
unsigned long kernel_virt_addr;
unsigned long page, kernel_virt_addr;
struct page *ret = NOPAGE_SIGBUS;
struct video_card *video = (struct video_card*) area->vm_private_data;
......@@ -1449,7 +1510,10 @@ static struct page * dv1394_nopage(struct vm_area_struct * area, unsigned long a
offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) video->user_buf + offset;
ret = vmalloc_to_page((void *)kernel_virt_addr);
page = kvirt_to_pa(kernel_virt_addr);
ret = virt_to_page(__va(page));
get_page(ret);
out:
......@@ -2021,7 +2085,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
struct video_card *p;
list_for_each(lh, &dv1394_cards) {
p = list_entry(lh, struct video_card, list);
if(p->id == ieee1394_file_to_instance(file)) {
if((p->id >> 2) == ieee1394_file_to_instance(file)) {
video = p;
break;
}
......@@ -2046,7 +2110,6 @@ static int dv1394_open(struct inode *inode, struct file *file)
#endif
V22_COMPAT_MOD_INC_USE_COUNT;
return 0;
}
......@@ -2064,36 +2127,38 @@ static int dv1394_release(struct inode *inode, struct file *file)
/* give someone else a turn */
clear_bit(0, &video->open);
V22_COMPAT_MOD_DEC_USE_COUNT;
return 0;
}
/*** PROC_FS INTERFACE ******************************************************/
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *dv1394_procfs_entry;
static LIST_HEAD(dv1394_procfs);
struct dv1394_procfs_entry {
struct list_head list;
struct proc_dir_entry *procfs;
char name[32];
struct dv1394_procfs_entry *parent;
};
static spinlock_t dv1394_procfs_lock = SPIN_LOCK_UNLOCKED;
static int dv1394_procfs_read( char *page, char **start, off_t off,
int count, int *eof, void *data)
{
struct video_card *video = (struct video_card*) data;
V22_COMPAT_MOD_INC_USE_COUNT;
snprintf( page, count,
"\
dv1394 settings for host %d:\n\
----------------------------\n\
format=%s\n\
channel=%d\n\
cip_n=%lu\n\
cip_d=%lu\n\
syt_offset=%u\n",
video->id,
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
video->channel,
video->cip_n, video->cip_d, video->syt_offset );
V22_COMPAT_MOD_DEC_USE_COUNT;
return strlen(page);
}
#endif /* CONFIG_PROC_FS */
/* lifted from the stallion.c driver */
#undef TOLOWER
......@@ -2128,7 +2193,6 @@ static unsigned long atol(char *str)
return(val);
}
#ifdef CONFIG_PROC_FS
static int dv1394_procfs_write( struct file *file,
const char *buffer, unsigned long count, void *data)
{
......@@ -2137,17 +2201,13 @@ static int dv1394_procfs_write( struct file *file,
char *pos;
struct video_card *video = (struct video_card*) data;
V22_COMPAT_MOD_INC_USE_COUNT;
if (count > 64)
len = 64;
else
len = count;
if (copy_from_user( new_value, buffer, len)) {
V22_COMPAT_MOD_DEC_USE_COUNT;
if (copy_from_user( new_value, buffer, len))
return -EFAULT;
}
pos = strchr(new_value, '=');
if (pos != NULL) {
......@@ -2169,31 +2229,142 @@ static int dv1394_procfs_write( struct file *file,
video->cip_d = atol(buf);
} else if (strnicmp( new_value, "syt_offset", (pos-new_value)) == 0) {
video->syt_offset = atol(buf);
} else if (strnicmp( new_value, "channel", (pos-new_value)) == 0) {
video->channel = atol(buf);
}
}
V22_COMPAT_MOD_DEC_USE_COUNT;
return len;
}
struct dv1394_procfs_entry *
dv1394_procfs_find( char *name)
{
struct list_head *lh;
struct dv1394_procfs_entry *p;
spin_lock( &dv1394_procfs_lock);
if(!list_empty(&dv1394_procfs)) {
list_for_each(lh, &dv1394_procfs) {
p = list_entry(lh, struct dv1394_procfs_entry, list);
if(!strncmp(p->name, name, sizeof(p->name))) {
spin_unlock( &dv1394_procfs_lock);
return p;
}
}
}
spin_unlock( &dv1394_procfs_lock);
return NULL;
}
static int dv1394_procfs_add_entry(struct video_card *video)
{
struct proc_dir_entry *procfs_entry = NULL;
char buf[16];
char buf[32];
struct dv1394_procfs_entry *p;
struct dv1394_procfs_entry *parent;
p = kmalloc(sizeof(struct dv1394_procfs_entry), GFP_KERNEL);
if(!p) {
printk(KERN_ERR "dv1394: cannot allocate dv1394_procfs_entry\n");
goto err;
}
memset(p, 0, sizeof(struct dv1394_procfs_entry));
snprintf(buf, sizeof(buf), "dv/host%d/%s", (video->id>>2),
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"));
parent = dv1394_procfs_find(buf);
if (parent == NULL) {
printk(KERN_ERR "dv1394: unable to locate parent procfs of %s\n", buf);
goto err_free;
}
snprintf(buf, sizeof(buf), "%d", video->id);
p->procfs = create_proc_entry(
(video->mode == MODE_RECEIVE ? "in" : "out"),
0666, parent->procfs);
if (p->procfs == NULL) {
printk(KERN_ERR "dv1394: unable to create /proc/bus/ieee1394/%s/%s\n",
parent->name,
(video->mode == MODE_RECEIVE ? "in" : "out"));
goto err_free;
}
p->procfs->owner = THIS_MODULE;
p->procfs->data = video;
p->procfs->read_proc = dv1394_procfs_read;
p->procfs->write_proc = dv1394_procfs_write;
spin_lock( &dv1394_procfs_lock);
INIT_LIST_HEAD(&p->list);
list_add_tail(&p->list, &dv1394_procfs);
spin_unlock( &dv1394_procfs_lock);
return 0;
err_free:
kfree(p);
err:
return -ENOMEM;
}
procfs_entry = create_proc_entry( buf, 0666, dv1394_procfs_entry);
if (procfs_entry == NULL) {
printk(KERN_ERR "dv1394: unable to create /proc/bus/ieee1394/dv/X\n");
return -ENOMEM;
static int
dv1394_procfs_add_dir( char *name,
struct dv1394_procfs_entry *parent,
struct dv1394_procfs_entry **out)
{
struct dv1394_procfs_entry *p;
p = kmalloc(sizeof(struct dv1394_procfs_entry), GFP_KERNEL);
if(!p) {
printk(KERN_ERR "dv1394: cannot allocate dv1394_procfs_entry\n");
goto err;
}
procfs_entry->owner = THIS_MODULE;
procfs_entry->data = video;
procfs_entry->read_proc = dv1394_procfs_read;
procfs_entry->write_proc = dv1394_procfs_write;
memset(p, 0, sizeof(struct dv1394_procfs_entry));
if (parent == NULL) {
snprintf(p->name, sizeof(p->name), "%s", name);
p->procfs = proc_mkdir( name, ieee1394_procfs_entry);
} else {
snprintf(p->name, sizeof(p->name), "%s/%s", parent->name, name);
p->procfs = proc_mkdir( name, parent->procfs);
}
if (p->procfs == NULL) {
printk(KERN_ERR "dv1394: unable to create /proc/bus/ieee1394/%s\n", p->name);
goto err_free;
}
p->procfs->owner = THIS_MODULE;
p->parent = parent;
if (out != NULL) *out = p;
spin_lock( &dv1394_procfs_lock);
INIT_LIST_HEAD(&p->list);
list_add_tail(&p->list, &dv1394_procfs);
spin_unlock( &dv1394_procfs_lock);
return 0;
err_free:
kfree(p);
err:
return -ENOMEM;
}
void dv1394_procfs_del( char *name)
{
struct dv1394_procfs_entry *p = dv1394_procfs_find(name);
if (p != NULL) {
if (p->parent == NULL)
remove_proc_entry(p->name, ieee1394_procfs_entry);
else
remove_proc_entry(p->name, p->parent->procfs);
spin_lock( &dv1394_procfs_lock);
list_del(&p->list);
spin_unlock( &dv1394_procfs_lock);
kfree(p);
}
}
#endif /* CONFIG_PROC_FS */
......@@ -2431,7 +2602,7 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
static struct file_operations dv1394_fops=
{
OWNER_THIS_MODULE
owner: THIS_MODULE,
poll: dv1394_poll,
ioctl: dv1394_ioctl,
mmap: dv1394_mmap,
......@@ -2443,11 +2614,141 @@ static struct file_operations dv1394_fops=
};
static int dv1394_init(struct ti_ohci *ohci)
/*** DEVFS HELPERS *********************************************************/
struct dv1394_devfs_entry *
dv1394_devfs_find( char *name)
{
struct list_head *lh;
struct dv1394_devfs_entry *p;
spin_lock( &dv1394_devfs_lock);
if(!list_empty(&dv1394_devfs)) {
list_for_each(lh, &dv1394_devfs) {
p = list_entry(lh, struct dv1394_devfs_entry, list);
if(!strncmp(p->name, name, sizeof(p->name))) {
spin_unlock( &dv1394_devfs_lock);
return p;
}
}
}
return NULL;
}
static int dv1394_devfs_add_entry(struct video_card *video)
{
char buf[32];
struct dv1394_devfs_entry *p;
struct dv1394_devfs_entry *parent;
p = kmalloc(sizeof(struct dv1394_devfs_entry), GFP_KERNEL);
if(!p) {
printk(KERN_ERR "dv1394: cannot allocate dv1394_devfs_entry\n");
goto err;
}
memset(p, 0, sizeof(struct dv1394_devfs_entry));
snprintf(buf, sizeof(buf), "dv/host%d/%s", (video->id>>2),
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"));
parent = dv1394_devfs_find(buf);
if (parent == NULL) {
printk(KERN_ERR "dv1394: unable to locate parent devfs of %s\n", buf);
goto err_free;
}
video->devfs_handle = devfs_register(
parent->devfs,
(video->mode == MODE_RECEIVE ? "in" : "out"),
DEVFS_FL_NONE,
IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_DV1394*16 + video->id,
S_IFCHR | S_IRUGO | S_IWUGO,
&dv1394_fops,
(void*) video);
p->devfs = video->devfs_handle;
if (p->devfs == NULL) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/%s/%s\n",
parent->name,
(video->mode == MODE_RECEIVE ? "in" : "out"));
goto err_free;
}
spin_lock( &dv1394_devfs_lock);
INIT_LIST_HEAD(&p->list);
list_add_tail(&p->list, &dv1394_devfs);
spin_unlock( &dv1394_devfs_lock);
return 0;
err_free:
kfree(p);
err:
return -ENOMEM;
}
static int
dv1394_devfs_add_dir( char *name,
struct dv1394_devfs_entry *parent,
struct dv1394_devfs_entry **out)
{
struct dv1394_devfs_entry *p;
p = kmalloc(sizeof(struct dv1394_devfs_entry), GFP_KERNEL);
if(!p) {
printk(KERN_ERR "dv1394: cannot allocate dv1394_devfs_entry\n");
goto err;
}
memset(p, 0, sizeof(struct dv1394_devfs_entry));
if (parent == NULL) {
snprintf(p->name, sizeof(p->name), "%s", name);
p->devfs = devfs_mk_dir(ieee1394_devfs_handle, name, NULL);
} else {
snprintf(p->name, sizeof(p->name), "%s/%s", parent->name, name);
p->devfs = devfs_mk_dir(parent->devfs, name, NULL);
}
if (p->devfs == NULL) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/%s\n", p->name);
goto err_free;
}
p->parent = parent;
if (out != NULL) *out = p;
spin_lock( &dv1394_devfs_lock);
INIT_LIST_HEAD(&p->list);
list_add_tail(&p->list, &dv1394_devfs);
spin_unlock( &dv1394_devfs_lock);
return 0;
err_free:
kfree(p);
err:
return -ENOMEM;
}
void dv1394_devfs_del( char *name)
{
struct dv1394_devfs_entry *p = dv1394_devfs_find(name);
if (p != NULL) {
devfs_unregister(p->devfs);
spin_lock( &dv1394_devfs_lock);
list_del(&p->list);
spin_unlock( &dv1394_devfs_lock);
kfree(p);
}
}
/*** IEEE1394 HPSB CALLBACKS ***********************************************/
static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes mode)
{
struct video_card *video;
unsigned long flags;
char buf[16];
int i;
video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
......@@ -2465,10 +2766,9 @@ static int dv1394_init(struct ti_ohci *ohci)
video->ohci = ohci;
video->id = ohci->id;
if ( dv1394_procfs_add_entry(video) < 0 )
goto err_free;
/* lower 2 bits of id indicate which of four "plugs"
per host */
video->id = ohci->id << 2;
video->ohci_it_ctx = -1;
video->ohci_ir_ctx = -1;
......@@ -2483,14 +2783,20 @@ static int dv1394_init(struct ti_ohci *ohci)
video->ohci_IsoRcvContextMatch = 0;
video->n_frames = 0; /* flag that video is not initialized */
video->channel = -1;
video->channel = 63; /* default to broadcast channel */
video->active_frame = -1;
/* initialize the following for proc_fs */
video->pal_or_ntsc = DV1394_NTSC;
video->pal_or_ntsc = format;
video->cip_n = 0; /* 0 = use builtin default */
video->cip_d = 0;
video->syt_offset = 0;
video->mode = mode;
#ifdef CONFIG_PROC_FS
if ( dv1394_procfs_add_entry(video) < 0 )
goto err_free;
#endif
for(i = 0; i < DV1394_MAX_FRAMES; i++)
video->frames[i] = NULL;
......@@ -2509,17 +2815,14 @@ static int dv1394_init(struct ti_ohci *ohci)
list_add_tail(&video->list, &dv1394_cards);
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
snprintf(buf, sizeof(buf), "%d", video->id);
if (format == DV1394_NTSC)
video->id |= mode;
else video->id |= 2 + mode;
video->devfs_handle = devfs_register(dv1394_devfs_handle,
buf, DEVFS_FL_NONE,
IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_DV1394*16 + video->id,
S_IFCHR | S_IRUGO | S_IWUGO,
&dv1394_fops,
(void*) video);
if (dv1394_devfs_add_entry(video) < 0)
goto err_free;
debug_printk("dv1394: dv1394_init() OK on ID %d\n", ohci->id);
debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
return 0;
......@@ -2531,18 +2834,20 @@ static int dv1394_init(struct ti_ohci *ohci)
static void dv1394_un_init(struct video_card *video)
{
unsigned long flags;
char buf[32];
/* obviously nobody has the driver open at this point */
do_dv1394_shutdown(video, 1);
ohci1394_unhook_irq(video->ohci, irq_handler, (void*) video);
if(video->devfs_handle)
devfs_unregister(video->devfs_handle);
spin_lock_irqsave(&dv1394_cards_lock, flags);
snprintf(buf, sizeof(buf), "dv/host%d/%s/%s", (video->id >> 2),
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
(video->mode == MODE_RECEIVE ? "in" : "out")
);
dv1394_devfs_del(buf);
#ifdef CONFIG_PROC_FS
dv1394_procfs_del(buf);
#endif
list_del(&video->list);
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
kfree(video);
}
......@@ -2553,6 +2858,8 @@ static void dv1394_remove_host (struct hpsb_host *host)
struct video_card *video = NULL;
unsigned long flags;
struct list_head *lh;
char buf[32];
int n;
/* We only work with the OHCI-1394 driver */
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
......@@ -2561,33 +2868,40 @@ static void dv1394_remove_host (struct hpsb_host *host)
ohci = (struct ti_ohci *)host->hostdata;
/* find the corresponding video_card */
/* find the corresponding video_cards */
spin_lock_irqsave(&dv1394_cards_lock, flags);
if(!list_empty(&dv1394_cards)) {
struct video_card *p;
list_for_each(lh, &dv1394_cards) {
p = list_entry(lh, struct video_card, list);
if(p->id == ohci->id) {
video = p;
break;
}
video = list_entry(lh, struct video_card, list);
if((video->id >> 2) == ohci->id)
dv1394_un_init(video);
}
}
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
if(video) {
char buf[16];
dv1394_un_init(video);
snprintf( buf, sizeof(buf), "%i", video->id);
n = (video->id >> 2);
snprintf(buf, sizeof(buf), "dv/host%d/NTSC", n);
dv1394_devfs_del(buf);
snprintf(buf, sizeof(buf), "dv/host%d/PAL", n);
dv1394_devfs_del(buf);
snprintf(buf, sizeof(buf), "dv/host%d", n);
dv1394_devfs_del(buf);
#ifdef CONFIG_PROC_FS
remove_proc_entry( buf, dv1394_procfs_entry);
#endif
}
snprintf(buf, sizeof(buf), "dv/host%d/NTSC", n);
dv1394_procfs_del(buf);
snprintf(buf, sizeof(buf), "dv/host%d/PAL", n);
dv1394_procfs_del(buf);
snprintf(buf, sizeof(buf), "dv/host%d", n);
dv1394_procfs_del(buf);
#endif
}
static void dv1394_add_host (struct hpsb_host *host)
{
struct ti_ohci *ohci;
char buf[16];
struct dv1394_devfs_entry *devfs_entry;
/* We only work with the OHCI-1394 driver */
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
......@@ -2595,7 +2909,31 @@ static void dv1394_add_host (struct hpsb_host *host)
ohci = (struct ti_ohci *)host->hostdata;
dv1394_init(ohci);
#ifdef CONFIG_PROC_FS
{
struct dv1394_procfs_entry *p;
p = dv1394_procfs_find("dv");
if (p != NULL) {
snprintf(buf, sizeof(buf), "host%d", ohci->id);
dv1394_procfs_add_dir(buf, p, &p);
dv1394_procfs_add_dir("NTSC", p, NULL);
dv1394_procfs_add_dir("PAL", p, NULL);
}
}
#endif
devfs_entry = dv1394_devfs_find("dv");
if (devfs_entry != NULL) {
snprintf(buf, sizeof(buf), "host%d", ohci->id);
dv1394_devfs_add_dir(buf, devfs_entry, &devfs_entry);
dv1394_devfs_add_dir("NTSC", devfs_entry, NULL);
dv1394_devfs_add_dir("PAL", devfs_entry, NULL);
}
dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
dv1394_init(ohci, DV1394_PAL, MODE_RECEIVE);
dv1394_init(ohci, DV1394_PAL, MODE_TRANSMIT);
}
static struct hpsb_highlevel_ops hl_ops = {
......@@ -2615,9 +2953,9 @@ static void __exit dv1394_exit_module(void)
{
hpsb_unregister_highlevel (hl_handle);
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
devfs_unregister(dv1394_devfs_handle);
dv1394_devfs_del("dv");
#ifdef CONFIG_PROC_FS
remove_proc_entry( "dv", ieee1394_procfs_entry);
dv1394_procfs_del("dv");
#endif
}
......@@ -2629,26 +2967,28 @@ static int __init dv1394_init_module(void)
return -EIO;
}
dv1394_devfs_handle = devfs_mk_dir(ieee1394_devfs_handle, "dv", NULL);
if (dv1394_devfs_add_dir("dv", NULL, NULL) < 0) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/dv\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
return -ENOMEM;
}
#ifdef CONFIG_PROC_FS
dv1394_procfs_entry = proc_mkdir( "dv", ieee1394_procfs_entry);
if (dv1394_procfs_entry == NULL) {
printk(KERN_ERR "dv1394: unable to create /proc/ieee1394/dv\n");
if (dv1394_procfs_add_dir("dv",NULL,NULL) < 0) {
printk(KERN_ERR "dv1394: unable to create /proc/bus/ieee1394/dv\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
devfs_unregister(dv1394_devfs_handle);
dv1394_devfs_del("dv");
return -ENOMEM;
}
dv1394_procfs_entry->owner = THIS_MODULE;
#endif
hl_handle = hpsb_register_highlevel ("dv1394", &hl_ops);
if (hl_handle == NULL) {
printk(KERN_ERR "dv1394: hpsb_register_highlevel failed\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
devfs_unregister(dv1394_devfs_handle);
dv1394_devfs_del("dv");
#ifdef CONFIG_PROC_FS
remove_proc_entry( "dv", ieee1394_procfs_entry);
dv1394_procfs_del("dv");
#endif
return -ENOMEM;
}
......
/*
* eth1394.c -- Ethernet driver for Linux IEEE-1394 Subsystem
*
* Copyright (C) 2001 Ben Collins <bcollins@debian.org>
* 2000 Bonin Franck <boninf@free.fr>
*
* Mainly based on work by Emanuel Pirker and Andreas E. Bombe
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* State of this driver:
*
* This driver intends to support RFC 2734, which describes a method for
* transporting IPv4 datagrams over IEEE-1394 serial busses. This driver
* will ultimately support that method, but currently falls short in
* several areas. A few issues are:
*
* - Does not support send/recv over Async streams using GASP
* packet formats, as per the RFC for ARP requests.
* - Does not yet support fragmented packets.
* - Relies on hardware address being equal to the nodeid for some things.
* - Does not support multicast
* - Hardcoded address for sending packets, instead of using discovery
* (ARP, see first item)
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <asm/delay.h>
#include <asm/semaphore.h>
#include <asm/bitops.h>
#include <net/arp.h>
#include "ieee1394_types.h"
#include "ieee1394_core.h"
#include "ieee1394_transactions.h"
#include "ieee1394.h"
#include "highlevel.h"
#include "eth1394.h"
#define ETH1394_PRINT_G(level, fmt, args...) \
printk(level ETHER1394_DRIVER_NAME": "fmt, ## args)
#define ETH1394_PRINT(level, dev_name, fmt, args...) \
printk(level ETHER1394_DRIVER_NAME": %s: " fmt, dev_name, ## args)
#define DEBUG(fmt, args...) \
printk(KERN_ERR fmt, ## args)
static char version[] __devinitdata =
"eth1394.c:v0.50 15/Jul/01 Ben Collins <bcollins@debian.org>";
/* Our ieee1394 highlevel driver */
#define ETHER1394_DRIVER_NAME "ether1394"
static kmem_cache_t *packet_task_cache;
static struct hpsb_highlevel *hl_handle = NULL;
/* Card handling */
static LIST_HEAD (host_info_list);
static spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
/* Use common.lf to determine header len */
static int hdr_type_len[] = {
sizeof (struct eth1394_uf_hdr),
sizeof (struct eth1394_ff_hdr),
sizeof (struct eth1394_sf_hdr),
sizeof (struct eth1394_sf_hdr)
};
MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
/* Find our host_info struct for a given host pointer. Must be called
* under spinlock. */
static inline struct host_info *find_host_info (struct hpsb_host *host)
{
struct list_head *lh;
struct host_info *hi;
lh = host_info_list.next;
while (lh != &host_info_list) {
hi = list_entry (lh, struct host_info, list);
if (hi->host == host)
return hi;
lh = lh->next;
}
return NULL;
}
/* Find the network device for our host */
static inline struct net_device *ether1394_find_dev (struct hpsb_host *host)
{
struct host_info *hi;
spin_lock_irq (&host_info_lock);
hi = find_host_info (host);
spin_unlock_irq (&host_info_lock);
if (hi == NULL)
return NULL;
return hi->dev;
}
/* This is called after an "ifup" */
static int ether1394_open (struct net_device *dev)
{
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
/* Set the spinlock before grabbing IRQ! */
priv->lock = SPIN_LOCK_UNLOCKED;
netif_start_queue (dev);
return 0;
}
/* This is called after an "ifdown" */
static int ether1394_stop (struct net_device *dev)
{
netif_stop_queue (dev);
return 0;
}
/* Return statistics to the caller */
static struct net_device_stats *ether1394_stats (struct net_device *dev)
{
return &(((struct eth1394_priv *)dev->priv)->stats);
}
/* What to do if we timeout. I think a host reset is probably in order, so
* that's what we do. Should we increment the stat counters too? */
static void ether1394_tx_timeout (struct net_device *dev)
{
ETH1394_PRINT (KERN_ERR, dev->name, "Timeout, resetting host %s\n",
((struct eth1394_priv *)(dev->priv))->host->driver->name);
highlevel_host_reset (((struct eth1394_priv *)(dev->priv))->host);
netif_wake_queue (dev);
}
/* We need to encapsulate the standard header with our own. We use the
* ethernet header's proto for our own.
*
* XXX: This is where we need to create a list of skb's for fragmented
* packets. */
static inline void ether1394_encapsulate (struct sk_buff *skb, struct net_device *dev,
int proto)
{
union eth1394_hdr *hdr =
(union eth1394_hdr *)skb_push (skb, hdr_type_len[ETH1394_HDR_LF_UF]);
hdr->common.lf = ETH1394_HDR_LF_UF;
hdr->words.word1 = htons(hdr->words.word1);
hdr->uf.ether_type = proto;
return;
}
/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the
* entire arphdr) is the same format as the ip1394 header, so they
* overlap. The rest needs to be munged a bit. The remainder of the
* arphdr is formatted based on hwaddr len and ipaddr len. We know what
* they'll be, so it's easy to judge. */
static inline void ether1394_arp_to_1394arp (struct sk_buff *skb, struct net_device *dev)
{
struct eth1394_priv *priv =
(struct eth1394_priv *)(dev->priv);
u16 phy_id = priv->host->node_id & NODE_MASK;
unsigned char *arp_ptr = (unsigned char *)skb->data;
struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
unsigned char arp_data[2*(dev->addr_len+4)];
/* Copy the main data that we need */
arp_ptr = memcpy (arp_data, arp_ptr + sizeof(struct arphdr), sizeof (arp_data));
/* Extend the buffer enough for our new header */
skb_put (skb, sizeof (struct eth1394_arp) -
(sizeof (arp_data) + sizeof (struct arphdr)));
#define PROCESS_MEMBER(ptr,val,len) \
memcpy (val, ptr, len); ptr += len
arp_ptr += arp1394->hw_addr_len;
PROCESS_MEMBER (arp_ptr, &arp1394->sip, arp1394->ip_addr_len);
arp_ptr += arp1394->hw_addr_len;
PROCESS_MEMBER (arp_ptr, &arp1394->tip, arp1394->ip_addr_len);
#undef PROCESS_MEMBER
/* Now add our own flavor of arp header fields to the orig one */
arp1394->hw_addr_len = IP1394_HW_ADDR_LEN;
arp1394->hw_type = __constant_htons (ARPHRD_IEEE1394);
arp1394->s_uniq_id = cpu_to_le64 (priv->eui[phy_id]);
arp1394->max_rec = priv->max_rec[phy_id];
arp1394->sspd = priv->sspd[phy_id];
arp1394->fifo_hi = htons (priv->fifo_hi[phy_id]);
arp1394->fifo_lo = htonl (priv->fifo_lo[phy_id]);
return;
}
static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > ETHER1394_REGION_ADDR_LEN))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static inline void ether1394_register_limits (int nodeid, unsigned char max_rec,
unsigned char sspd, u64 eui, u16 fifo_hi,
u32 fifo_lo, struct eth1394_priv *priv)
{
int i;
if (nodeid < 0 || nodeid >= ALL_NODES) {
ETH1394_PRINT_G (KERN_ERR, "Cannot register invalid nodeid %d\n", nodeid);
return;
}
priv->max_rec[nodeid] = max_rec;
priv->sspd[nodeid] = sspd;
priv->fifo_hi[nodeid] = fifo_hi;
priv->fifo_lo[nodeid] = fifo_lo;
priv->eui[nodeid] = eui;
/* 63 is used for broadcasts to all hosts. It is equal to the
* minimum of all registered nodes. A registered node is one with
* a nonzero offset. Set the values rediculously high to start. We
* know we have atleast one to change the default to. */
sspd = 0xff;
max_rec = 0xff;
for (i = 0; i < ALL_NODES; i++) {
if (!priv->fifo_hi && !priv->fifo_lo) continue; /* Unregistered */
if (priv->max_rec[i] < max_rec) max_rec = priv->max_rec[i];
if (priv->sspd[i] < sspd) sspd = priv->sspd[i];
}
priv->max_rec[ALL_NODES] = max_rec;
priv->sspd[ALL_NODES] = sspd;
return;
}
static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
{
int flags;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
int phy_id = priv->host->node_id & NODE_MASK;
spin_lock_irqsave (&priv->lock, flags);
/* Clear the speed/payload/offset tables */
memset (priv->max_rec, 0, sizeof (priv->max_rec));
memset (priv->sspd, 0, sizeof (priv->sspd));
memset (priv->fifo_hi, 0, sizeof (priv->fifo_hi));
memset (priv->fifo_lo, 0, sizeof (priv->fifo_lo));
/* Register our limits now */
ether1394_register_limits (phy_id, (be32_to_cpu(priv->host->csr.rom[2]) >> 12) & 0xf,
priv->host->speed_map[(phy_id << 6) + phy_id],
(u64)(((u64)be32_to_cpu(priv->host->csr.rom[3]) << 32) |
be32_to_cpu(priv->host->csr.rom[4])),
ETHER1394_REGION_ADDR >> 32,
ETHER1394_REGION_ADDR & 0xffffffff, priv);
/* We'll use our max_rec as the default mtu */
if (set_mtu)
dev->mtu = (1 << (priv->max_rec[phy_id] + 1)) - sizeof (union eth1394_hdr);
/* Set our hardware address while we're at it */
*(nodeid_t *)dev->dev_addr = htons (priv->host->node_id);
spin_unlock_irqrestore (&priv->lock, flags);
}
static int ether1394_tx (struct sk_buff *skb, struct net_device *dev);
/* This function is called by register_netdev */
static int ether1394_init_dev (struct net_device *dev)
{
/* Our functions */
dev->open = ether1394_open;
dev->stop = ether1394_stop;
dev->hard_start_xmit = ether1394_tx;
dev->get_stats = ether1394_stats;
dev->tx_timeout = ether1394_tx_timeout;
dev->change_mtu = ether1394_change_mtu;
/* Some constants */
dev->watchdog_timeo = ETHER1394_TIMEOUT;
dev->flags = IFF_BROADCAST; /* TODO: Support MCAP */
dev->features = NETIF_F_NO_CSUM|NETIF_F_SG|NETIF_F_HIGHDMA|NETIF_F_FRAGLIST;
dev->addr_len = 2;
ether1394_reset_priv (dev, 1);
return 0;
}
/*
* This function is called every time a card is found. It is generally called
* when the module is installed. This is where we add all of our ethernet
* devices. One for each host.
*/
static void ether1394_add_host (struct hpsb_host *host)
{
struct host_info *hi = NULL;
struct net_device *dev = NULL;
struct eth1394_priv *priv;
static int version_printed = 0;
if (version_printed++ == 0)
ETH1394_PRINT_G (KERN_INFO, "%s\n", version);
dev = alloc_etherdev(sizeof (struct eth1394_priv));
if (dev == NULL)
goto out;
SET_MODULE_OWNER(dev);
dev->init = ether1394_init_dev;
priv = (struct eth1394_priv *)dev->priv;
priv->host = host;
hi = (struct host_info *)kmalloc (sizeof (struct host_info),
GFP_KERNEL);
if (hi == NULL)
goto out;
if (register_netdev (dev)) {
ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
kfree (dev);
return;
}
ETH1394_PRINT (KERN_ERR, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet (%s)\n",
host->driver->name);
INIT_LIST_HEAD (&hi->list);
hi->host = host;
hi->dev = dev;
spin_lock_irq (&host_info_lock);
list_add_tail (&hi->list, &host_info_list);
spin_unlock_irq (&host_info_lock);
return;
out:
if (dev != NULL)
kfree (dev);
ETH1394_PRINT_G (KERN_ERR, "Out of memory\n");
return;
}
/* Remove a card from our list */
static void ether1394_remove_host (struct hpsb_host *host)
{
struct host_info *hi;
spin_lock_irq (&host_info_lock);
hi = find_host_info (host);
if (hi != NULL) {
unregister_netdev (hi->dev);
kfree (hi->dev);
list_del (&hi->list);
kfree (hi);
}
spin_unlock_irq (&host_info_lock);
return;
}
/* A reset has just arisen */
static void ether1394_host_reset (struct hpsb_host *host)
{
struct net_device *dev = ether1394_find_dev(host);
/* This can happen for hosts that we don't use */
if (dev == NULL)
return;
/* Reset our private host data, but not our mtu */
netif_stop_queue (dev);
ether1394_reset_priv (dev, 0);
netif_wake_queue (dev);
}
/* Copied from net/ethernet/eth.c */
static inline unsigned short ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct ethhdr *eth;
unsigned char *rawp;
skb->mac.raw = skb->data;
skb_pull (skb, ETH_HLEN);
eth = skb->mac.ethernet;
#if 0
if(*eth->h_dest & 1) {
if(memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0)
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else {
if(memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
}
#endif
if (ntohs (eth->h_proto) >= 1536)
return eth->h_proto;
rawp = skb->data;
if (*(unsigned short *)rawp == 0xFFFF)
return htons (ETH_P_802_3);
return htons (ETH_P_802_2);
}
/* Parse an encapsulated IP1394 header into an ethernet frame packet.
* We also perform ARP translation here, if need be. */
static inline unsigned short ether1394_parse_encap (struct sk_buff *skb, struct net_device *dev,
nodeid_t srcid, nodeid_t destid)
{
union eth1394_hdr *hdr = (union eth1394_hdr *)skb->data;
unsigned char src_hw[ETH_ALEN], dest_hw[ETH_ALEN];
unsigned short ret = 0;
/* Setup our hw addresses. We use these to build the
* ethernet header. */
*(u16 *)dest_hw = htons(destid);
*(u16 *)src_hw = htons(srcid);
/* Remove the encapsulation header */
hdr->words.word1 = ntohs(hdr->words.word1);
skb_pull (skb, hdr_type_len[hdr->common.lf]);
/* If this is an ARP packet, convert it. First, we want to make
* use of some of the fields, since they tell us a little bit
* about the sending machine. */
if (hdr->uf.ether_type == __constant_htons (ETH_P_ARP)) {
int flags;
u16 phy_id = srcid & NODE_MASK;
struct eth1394_priv *priv =
(struct eth1394_priv *)dev->priv;
struct eth1394_arp arp1394;
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr = (unsigned char *)(arp + 1);
memcpy (&arp1394, arp, sizeof (struct eth1394_arp));
/* Update our speed/payload/fifo_offset table */
spin_lock_irqsave (&priv->lock, flags);
ether1394_register_limits (phy_id, arp1394.max_rec, arp1394.sspd,
le64_to_cpu (arp1394.s_uniq_id),
ntohs (arp1394.fifo_hi),
ntohl (arp1394.fifo_lo), priv);
spin_unlock_irqrestore (&priv->lock, flags);
#define PROCESS_MEMBER(ptr,val,len) \
ptr = memcpy (ptr, val, len) + len
PROCESS_MEMBER (arp_ptr, src_hw, dev->addr_len);
PROCESS_MEMBER (arp_ptr, &arp1394.sip, 4);
PROCESS_MEMBER (arp_ptr, dest_hw, dev->addr_len);
PROCESS_MEMBER (arp_ptr, &arp1394.tip, 4);
#undef PROCESS_MEMBER
arp->ar_hln = dev->addr_len;
arp->ar_hrd = __constant_htons (ARPHRD_ETHER);
skb_trim (skb, sizeof (struct arphdr) + 2*(dev->addr_len+4));
}
/* Now add the ethernet header. */
if (dev->hard_header (skb, dev, __constant_ntohs (hdr->uf.ether_type),
dest_hw, src_hw, skb->len) >= 0)
ret = ether1394_type_trans(skb, dev);
return ret;
}
/* Packet reception. We convert the IP1394 encapsulation header to an
* ethernet header, and fill it with some of our other fields. This is
* an incoming packet from the 1394 bus. */
static int ether1394_write (struct hpsb_host *host, int srcid, int destid,
quadlet_t *data, u64 addr, unsigned int len)
{
struct sk_buff *skb;
char *buf = (char *)data;
int flags;
struct net_device *dev = ether1394_find_dev (host);
struct eth1394_priv *priv;
if (dev == NULL) {
ETH1394_PRINT_G (KERN_ERR, "Could not find net device for host %p\n",
host);
return RCODE_ADDRESS_ERROR;
}
priv = (struct eth1394_priv *)dev->priv;
/* A packet has been received by the ieee1394 bus. Build an skbuff
* around it so we can pass it to the high level network layer. */
skb = dev_alloc_skb (len + dev->hard_header_len + 15);
if (!skb) {
HPSB_PRINT (KERN_ERR, "ether1394 rx: low on mem\n");
priv->stats.rx_dropped++;
return RCODE_ADDRESS_ERROR;
}
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
memcpy (skb_put (skb, len), buf, len);
/* Write metadata, and then pass to the receive level */
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
/* Parse the encapsulation header. This actually does the job of
* converting to an ethernet frame header, aswell as arp
* conversion if needed. ARP conversion is easier in this
* direction, since we are using ethernet as our backend. */
skb->protocol = ether1394_parse_encap (skb, dev, srcid, destid);
spin_lock_irqsave (&priv->lock, flags);
if (!skb->protocol) {
priv->stats.rx_errors++;
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
goto bad_proto;
}
netif_stop_queue(dev);
if (netif_rx (skb) == NET_RX_DROP) {
priv->stats.rx_errors++;
priv->stats.rx_dropped++;
goto bad_proto;
}
/* Statistics */
priv->stats.rx_packets++;
priv->stats.rx_bytes += skb->len;
bad_proto:
netif_start_queue(dev);
spin_unlock_irqrestore (&priv->lock, flags);
dev->last_rx = jiffies;
return RCODE_COMPLETE;
}
/* This function is our scheduled write */
static void hpsb_write_sched (void *__ptask)
{
struct packet_task *ptask = (struct packet_task *)__ptask;
struct sk_buff *skb = ptask->skb;
struct net_device *dev = ptask->skb->dev;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
int flags;
/* Statistics */
spin_lock_irqsave (&priv->lock, flags);
if (!hpsb_write(priv->host, ptask->dest_node,
get_hpsb_generation(priv->host),
ptask->addr, (quadlet_t *)skb->data, skb->len)) {
priv->stats.tx_bytes += skb->len;
priv->stats.tx_packets++;
} else {
//printk("Failed in hpsb_write_sched\n");
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
if (netif_queue_stopped (dev))
netif_wake_queue (dev);
}
spin_unlock_irqrestore (&priv->lock, flags);
dev->trans_start = jiffies;
dev_kfree_skb(skb);
kmem_cache_free(packet_task_cache, ptask);
return;
}
/* Transmit a packet (called by kernel) */
static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
{
int kmflags = in_interrupt () ? GFP_ATOMIC : GFP_KERNEL;
struct ethhdr *eth;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
int proto, flags;
nodeid_t dest_node;
u64 addr;
struct packet_task *ptask = NULL;
int ret = 0;
if ((skb = skb_share_check (skb, kmflags)) == NULL) {
ret = -ENOMEM;
goto fail;
}
/* Get rid of the ethernet header, but save a pointer */
eth = (struct ethhdr *)skb->data;
skb_pull (skb, ETH_HLEN);
/* Save the destination id, and proto for our encapsulation, then
* toss the ethernet header aside like the cheap whore it is. */
dest_node = ntohs (*(nodeid_t *)(eth->h_dest));
proto = eth->h_proto;
/* If this is an ARP packet, convert it */
if (proto == __constant_htons (ETH_P_ARP))
ether1394_arp_to_1394arp (skb, dev);
/* Now add our encapsulation header */
ether1394_encapsulate (skb, dev, proto);
/* TODO: The above encapsulate function needs to recognize when a
* packet needs to be split for a specified node. It should create
* a list of skb's that we could then iterate over for the below
* call to schedule our writes. */
/* XXX: Right now we accept that we don't exactly follow RFC. When
* we do, we will send ARP requests via GASP format, and so we wont
* need this hack. */
spin_lock_irqsave (&priv->lock, flags);
addr = (u64)priv->fifo_hi[dest_node & NODE_MASK] << 32 |
priv->fifo_lo[dest_node & NODE_MASK];
spin_unlock_irqrestore (&priv->lock, flags);
if (!addr)
addr = ETHER1394_REGION_ADDR;
ptask = kmem_cache_alloc(packet_task_cache, kmflags);
if (ptask == NULL) {
ret = -ENOMEM;
goto fail;
}
ptask->skb = skb;
ptask->addr = addr;
ptask->dest_node = dest_node;
INIT_TQUEUE(&ptask->tq, hpsb_write_sched, ptask);
schedule_task(&ptask->tq);
return 0;
fail:
printk("Failed in ether1394_tx\n");
if (skb != NULL)
dev_kfree_skb (skb);
spin_lock_irqsave (&priv->lock, flags);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
if (netif_queue_stopped (dev))
netif_wake_queue (dev);
spin_unlock_irqrestore (&priv->lock, flags);
return ret;
}
/* Function for incoming 1394 packets */
static struct hpsb_address_ops addr_ops = {
write: ether1394_write,
};
/* Ieee1394 highlevel driver functions */
static struct hpsb_highlevel_ops hl_ops = {
add_host: ether1394_add_host,
remove_host: ether1394_remove_host,
host_reset: ether1394_host_reset,
};
static int __init ether1394_init_module (void)
{
packet_task_cache = kmem_cache_create("packet_task", sizeof(struct packet_task),
0, 0, NULL, NULL);
/* Register ourselves as a highlevel driver */
hl_handle = hpsb_register_highlevel (ETHER1394_DRIVER_NAME, &hl_ops);
if (hl_handle == NULL) {
ETH1394_PRINT_G (KERN_ERR, "No more memory for driver\n");
return -ENOMEM;
}
hpsb_register_addrspace (hl_handle, &addr_ops, ETHER1394_REGION_ADDR,
ETHER1394_REGION_ADDR_END);
return 0;
}
static void __exit ether1394_exit_module (void)
{
hpsb_unregister_highlevel (hl_handle);
kmem_cache_destroy(packet_task_cache);
}
module_init(ether1394_init_module);
module_exit(ether1394_exit_module);
/*
* eth1394.h -- Ethernet driver for Linux IEEE-1394 Subsystem
*
* Copyright (C) 2000 Bonin Franck <boninf@free.fr>
* (C) 2001 Ben Collins <bcollins@debian.org>
*
* Mainly based on work by Emanuel Pirker and Andreas E. Bombe
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __ETH1394_H
#define __ETH1394_H
/* Register for incoming packets. This is 8192 bytes, which supports up to
* 1600mbs. We'll need to change this if that ever becomes "small" :) */
#define ETHER1394_REGION_ADDR_LEN 8192
#define ETHER1394_REGION_ADDR 0xfffff0200000ULL
#define ETHER1394_REGION_ADDR_END (ETHER1394_REGION_ADDR + ETHER1394_REGION_ADDR_LEN)
/* Node set == 64 */
#define NODE_SET (ALL_NODES + 1)
/* Private structure for our ethernet driver */
struct eth1394_priv {
struct net_device_stats stats; /* Device stats */
struct hpsb_host *host; /* The card for this dev */
unsigned char max_rec[NODE_SET];/* Max payload per node */
unsigned char sspd[NODE_SET]; /* Max speed per node */
u16 fifo_hi[ALL_NODES]; /* 16bit hi fifo offset per node */
u32 fifo_lo[ALL_NODES]; /* 32bit lo fifo offset per node */
u64 eui[ALL_NODES]; /* EUI-64 per node */
spinlock_t lock; /* Private lock */
};
struct host_info {
struct list_head list;
struct hpsb_host *host;
struct net_device *dev;
};
/* This is our task struct. It's used for the complete_tq callback. */
struct packet_task {
struct sk_buff *skb; /* Socket buffer we are sending */
nodeid_t dest_node; /* Destination of the packet */
u64 addr; /* Address */
struct tq_struct tq; /* The task */
};
/* IP1394 headers */
#include <asm/byteorder.h>
/* Unfragmented */
#if defined __BIG_ENDIAN_BITFIELD
struct eth1394_uf_hdr {
u8 lf:2;
u16 res:14;
u16 ether_type; /* Ethernet packet type */
} __attribute__((packed));
#elif defined __LITTLE_ENDIAN_BITFIELD
struct eth1394_uf_hdr {
u16 res:14;
u8 lf:2;
u16 ether_type;
} __attribute__((packed));
#else
#error Unknown bit field type
#endif
/* First fragment */
#if defined __BIG_ENDIAN_BITFIELD
struct eth1394_ff_hdr {
u8 lf:2;
u8 res1:2;
u16 dg_size:12; /* Datagram size */
u16 ether_type; /* Ethernet packet type */
u16 dgl; /* Datagram label */
u16 res2;
} __attribute__((packed));
#elif defined __LITTLE_ENDIAN_BITFIELD
struct eth1394_ff_hdr {
u16 dg_size:12;
u8 res1:2;
u8 lf:2;
u16 ether_type;
u16 dgl;
u16 res2;
} __attribute__((packed));
#else
#error Unknown bit field type
#endif
/* XXX: Subsequent fragments, including last */
#if defined __BIG_ENDIAN_BITFIELD
struct eth1394_sf_hdr {
u8 lf:2;
u8 res1:2;
u16 dg_size:12; /* Datagram size */
u8 res2:6;
u16 fg_off:10; /* Fragment offset */
u16 dgl; /* Datagram label */
u16 res3;
} __attribute__((packed));
#elif defined __LITTLE_ENDIAN_BITFIELD
struct eth1394_sf_hdr {
u16 dg_size:12;
u8 res1:2;
u8 lf:2;
u16 fg_off:10;
u8 res2:6;
u16 dgl;
u16 res3;
} __attribute__((packed));
#else
#error Unknown bit field type
#endif
#if defined __BIG_ENDIAN_BITFIELD
struct eth1394_common_hdr {
u8 lf:2;
u16 pad1:14;
} __attribute__((packed));
#elif defined __LITTLE_ENDIAN_BITFIELD
struct eth1394_common_hdr {
u16 pad1:14;
u8 lf:2;
} __attribute__((packed));
#else
#error Unknown bit field type
#endif
struct eth1394_hdr_words {
u16 word1;
u16 word2;
u16 word3;
u16 word4;
};
union eth1394_hdr {
struct eth1394_common_hdr common;
struct eth1394_uf_hdr uf;
struct eth1394_ff_hdr ff;
struct eth1394_sf_hdr sf;
struct eth1394_hdr_words words;
};
/* End of IP1394 headers */
/* Fragment types */
#define ETH1394_HDR_LF_UF 0 /* unfragmented */
#define ETH1394_HDR_LF_FF 1 /* first fragment */
#define ETH1394_HDR_LF_LF 2 /* last fragment */
#define ETH1394_HDR_LF_IF 3 /* interior fragment */
#define IP1394_HW_ADDR_LEN 16 /* As per RFC */
/* Our arp packet (ARPHRD_IEEE1394) */
struct eth1394_arp {
u16 hw_type; /* 0x0018 */
u16 proto_type; /* 0x0806 */
u8 hw_addr_len; /* 16 */
u8 ip_addr_len; /* 4 */
u16 opcode; /* ARP Opcode */
/* Above is exactly the same format as struct arphdr */
u64 s_uniq_id; /* Sender's 64bit EUI */
u8 max_rec; /* Sender's max packet size */
u8 sspd; /* Sender's max speed */
u16 fifo_hi; /* hi 16bits of sender's FIFO addr */
u32 fifo_lo; /* lo 32bits of sender's FIFO addr */
u32 sip; /* Sender's IP Address */
u32 tip; /* IP Address of requested hw addr */
};
/* Network timeout */
#define ETHER1394_TIMEOUT 100000
#endif /* __ETH1394_H */
......@@ -43,6 +43,16 @@ static struct hpsb_host_operations dummy_ops = {
devctl: dummy_devctl
};
/**
* hpsb_ref_host - increase reference count for host controller.
* @host: the host controller
*
* Increase the reference count for the specified host controller.
* When holding a reference to a host, the memory allocated for the
* host struct will not be freed and the host is guaranteed to be in a
* consistent state. The driver may be unloaded or the controller may
* be removed (PCMCIA), but the host struct will remain valid.
*/
int hpsb_ref_host(struct hpsb_host *host)
{
......@@ -53,18 +63,26 @@ int hpsb_ref_host(struct hpsb_host *host)
spin_lock_irqsave(&hosts_lock, flags);
list_for_each(lh, &hosts) {
if (host == list_entry(lh, struct hpsb_host, host_list)) {
if (host->ops->devctl(host, MODIFY_USAGE, 1)) {
host->refcount++;
retval = 1;
}
host->ops->devctl(host, MODIFY_USAGE, 1);
host->refcount++;
retval = 1;
break;
}
}
}
spin_unlock_irqrestore(&hosts_lock, flags);
return retval;
}
/**
* hpsb_unref_host - decrease reference count for host controller.
* @host: the host controller
*
* Decrease the reference count for the specified host controller.
* When the reference count reaches zero, the memory allocated for the
* &hpsb_host will be freed.
*/
void hpsb_unref_host(struct hpsb_host *host)
{
unsigned long flags;
......@@ -74,23 +92,44 @@ void hpsb_unref_host(struct hpsb_host *host)
spin_lock_irqsave(&hosts_lock, flags);
host->refcount--;
if (!host->refcount && !host->is_shutdown)
if (!host->refcount && host->is_shutdown)
kfree(host);
spin_unlock_irqrestore(&hosts_lock, flags);
}
/**
* hpsb_alloc_host - allocate a new host controller.
* @drv: the driver that will manage the host controller
* @extra: number of extra bytes to allocate for the driver
*
* Allocate a &hpsb_host and initialize the general subsystem specific
* fields. If the driver needs to store per host data, as drivers
* usually do, the amount of memory required can be specified by the
* @extra parameter. Once allocated, the driver should initialize the
* driver specific parts, enable the controller and make it available
* to the general subsystem using hpsb_add_host().
*
* The &hpsb_host is allocated with an single initial reference
* belonging to the driver. Once the driver is done with the struct,
* for example, when the driver is unloaded, it should release this
* reference using hpsb_unref_host().
*
* Return Value: a pointer to the &hpsb_host if succesful, %NULL if
* no memory was available.
*/
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
{
struct hpsb_host *h;
h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
if (!h) return NULL;
memset(h, 0, sizeof(struct hpsb_host));
memset(h, 0, sizeof(struct hpsb_host) + extra);
h->hostdata = h + 1;
h->driver = drv;
h->ops = drv->ops;
h->hostdata = h + 1;
h->refcount = 1;
INIT_LIST_HEAD(&h->pending_packets);
spin_lock_init(&h->pending_pkt_lock);
......@@ -134,9 +173,7 @@ void hpsb_remove_host(struct hpsb_host *host)
spin_lock_irqsave(&hosts_lock, flags);
list_del(&host->driver_list);
list_del(&host->host_list);
drv->number_of_hosts--;
if (!host->refcount) kfree(host);
spin_unlock_irqrestore(&hosts_lock, flags);
}
......
......@@ -36,6 +36,7 @@ struct hpsb_host {
int node_count; /* number of identified nodes on this bus */
int selfid_count; /* total number of SelfIDs received */
int nodes_active; /* number of nodes that are actually active */
nodeid_t node_id; /* node ID of this host */
nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
......
......@@ -41,6 +41,10 @@ MODULE_PARM(disable_nodemgr, "i");
MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
static int disable_nodemgr = 0;
MODULE_PARM(disable_hotplug, "i");
MODULE_PARM_DESC(disable_hotplug, "Disable hotplug for detected nodes.");
static int disable_hotplug = 0;
/* We are GPL, so treat us special */
MODULE_LICENSE("GPL");
......@@ -108,7 +112,7 @@ struct hpsb_packet *alloc_hpsb_packet(size_t data_size)
packet->data_size = data_size;
}
INIT_TQ_HEAD(packet->complete_tq);
INIT_LIST_HEAD(&packet->complete_tq);
INIT_LIST_HEAD(&packet->list);
sema_init(&packet->state_change, 0);
packet->state = hpsb_unused;
......@@ -177,6 +181,8 @@ static int check_selfids(struct hpsb_host *host)
struct ext_selfid *esid;
int esid_seq = 23;
host->nodes_active = 0;
while (rest_of_selfids--) {
if (!sid->extended) {
nodeid++;
......@@ -188,9 +194,11 @@ static int check_selfids(struct hpsb_host *host)
return 0;
}
if (sid->contender && sid->link_active) {
host->irm_id = LOCAL_BUS | sid->phy_id;
}
if (sid->link_active) {
host->nodes_active++;
if (sid->contender)
host->irm_id = LOCAL_BUS | sid->phy_id;
}
} else {
esid = (struct ext_selfid *)sid;
......@@ -224,7 +232,8 @@ static int check_selfids(struct hpsb_host *host)
return 0;
}
return nodeid + 1;
host->node_count = nodeid + 1;
return 1;
}
static void build_speed_map(struct hpsb_host *host, int nodecount)
......@@ -322,8 +331,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
host->node_id = LOCAL_BUS | phyid;
host->is_root = isroot;
host->node_count = check_selfids(host);
if (!host->node_count) {
if (!check_selfids(host)) {
if (host->reset_retries++ < 20) {
/* selfid stage did not complete without error */
HPSB_NOTICE("Error in SelfID stage, resetting");
......@@ -803,8 +811,8 @@ static rwlock_t ieee1394_chardevs_lock = RW_LOCK_UNLOCKED;
static int ieee1394_dispatch_open(struct inode *inode, struct file *file);
static struct file_operations ieee1394_chardev_ops = {
OWNER_THIS_MODULE
open: ieee1394_dispatch_open,
owner: THIS_MODULE,
open: ieee1394_dispatch_open,
};
devfs_handle_t ieee1394_devfs_handle;
......@@ -828,8 +836,6 @@ int ieee1394_register_chardev(int blocknum,
ieee1394_chardevs[blocknum].module = module;
retval = 0;
V22_COMPAT_MOD_INC_USE_COUNT;
} else {
/* block already taken */
retval = -EBUSY;
......@@ -851,7 +857,6 @@ void ieee1394_unregister_chardev(int blocknum)
if(ieee1394_chardevs[blocknum].file_ops) {
ieee1394_chardevs[blocknum].file_ops = NULL;
ieee1394_chardevs[blocknum].module = NULL;
V22_COMPAT_MOD_DEC_USE_COUNT;
}
write_unlock(&ieee1394_chardevs_lock);
......@@ -868,12 +873,6 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
/*
Maintaining correct module reference counts is tricky here!
For Linux v2.2:
The task-specific driver is expected to maintain its own
reference count via V22_COMPAT_MOD_[INC,DEC]_USE_COUNT.
We don't need to do anything special.
For Linux v2.4 and later:
The key thing to remember is that the VFS increments the
......@@ -894,17 +893,10 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
/* 2.2 */
#define INCREF(mod) do {} while (0)
#define DECREF(mod) do {} while (0)
#else
/* 2.4 */
#define INCREF(mod_) do { struct module *mod = (struct module*) mod_; \
if(mod != NULL) __MOD_INC_USE_COUNT(mod); } while(0)
#define DECREF(mod_) do { struct module *mod = (struct module*) mod_; \
if(mod != NULL) __MOD_DEC_USE_COUNT(mod); } while(0)
#endif
/* shift away lower four bits of the minor
to get the index of the ieee1394_driver
......@@ -1013,7 +1005,7 @@ static int __init ieee1394_init(void)
init_hpsb_highlevel();
init_csr();
if (!disable_nodemgr)
init_ieee1394_nodemgr();
init_ieee1394_nodemgr(disable_hotplug);
else
HPSB_INFO("nodemgr functionality disabled");
......@@ -1099,8 +1091,10 @@ EXPORT_SYMBOL(highlevel_host_reset);
EXPORT_SYMBOL(hpsb_guid_get_entry);
EXPORT_SYMBOL(hpsb_nodeid_get_entry);
EXPORT_SYMBOL(hpsb_get_host_by_ne);
EXPORT_SYMBOL(hpsb_guid_fill_packet);
EXPORT_SYMBOL(hpsb_node_fill_packet);
EXPORT_SYMBOL(hpsb_node_read);
EXPORT_SYMBOL(hpsb_node_write);
EXPORT_SYMBOL(hpsb_node_lock);
EXPORT_SYMBOL(hpsb_register_protocol);
EXPORT_SYMBOL(hpsb_unregister_protocol);
EXPORT_SYMBOL(hpsb_release_unit_directory);
......
......@@ -415,8 +415,8 @@ struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
* avoid in kernel buffers for user space callers
*/
int hpsb_read(struct hpsb_host *host, nodeid_t node, u64 addr,
quadlet_t *buffer, size_t length)
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length)
{
struct hpsb_packet *packet;
int retval = 0;
......@@ -447,7 +447,7 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, u64 addr,
return -ENOMEM;
}
packet->generation = get_hpsb_generation(host);
packet->generation = generation;
if (!hpsb_send_packet(packet)) {
retval = -EINVAL;
goto hpsb_read_fail;
......@@ -496,8 +496,8 @@ struct hpsb_packet *hpsb_make_packet (struct hpsb_host *host, nodeid_t node,
return packet;
}
int hpsb_write(struct hpsb_host *host, nodeid_t node, u64 addr,
quadlet_t *buffer, size_t length)
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length)
{
struct hpsb_packet *packet;
int retval;
......@@ -522,7 +522,7 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, u64 addr,
if (!packet)
return -ENOMEM;
packet->generation = get_hpsb_generation(host);
packet->generation = generation;
if (!hpsb_send_packet(packet)) {
retval = -EINVAL;
goto hpsb_write_fail;
......@@ -541,8 +541,8 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, u64 addr,
/* We need a hpsb_lock64 function for the 64 bit equivalent. Probably. */
int hpsb_lock(struct hpsb_host *host, nodeid_t node, u64 addr, int extcode,
quadlet_t *data, quadlet_t arg)
int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
{
struct hpsb_packet *packet;
int retval = 0, length;
......@@ -588,7 +588,7 @@ int hpsb_lock(struct hpsb_host *host, nodeid_t node, u64 addr, int extcode,
}
fill_async_lock(packet, addr, extcode, length);
packet->generation = get_hpsb_generation(host);
packet->generation = generation;
if (!hpsb_send_packet(packet)) {
retval = -EINVAL;
goto hpsb_lock_fail;
......
......@@ -63,14 +63,16 @@ int hpsb_packet_success(struct hpsb_packet *packet);
* The generic read, write and lock functions. All recognize the local node ID
* and act accordingly. Read and write automatically use quadlet commands if
* length == 4 and and block commands otherwise (however, they do not yet
* support lengths that are not a multiple of 4).
* support lengths that are not a multiple of 4). You must explicitly specifiy
* the generation for which the node ID is valid, to avoid sending packets to
* the wrong nodes when we race with a bus reset.
*/
int hpsb_read(struct hpsb_host *host, nodeid_t node, u64 addr,
quadlet_t *buffer, size_t length);
int hpsb_write(struct hpsb_host *host, nodeid_t node, u64 addr,
quadlet_t *buffer, size_t length);
int hpsb_lock(struct hpsb_host *host, nodeid_t node, u64 addr, int extcode,
quadlet_t *data, quadlet_t arg);
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
/* Generic packet creation. Used by hpsb_write. Also useful for protocol
* drivers that want to implement their own hpsb_write replacement. */
......
......@@ -10,17 +10,6 @@
#include <asm/byteorder.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
#include "linux22compat.h"
#else
#define V22_COMPAT_MOD_INC_USE_COUNT do {} while (0)
#define V22_COMPAT_MOD_DEC_USE_COUNT do {} while (0)
#define OWNER_THIS_MODULE owner: THIS_MODULE,
#define INIT_TQ_LINK(tq) INIT_LIST_HEAD(&(tq).list)
#define INIT_TQ_HEAD(tq) INIT_LIST_HEAD(&(tq))
#endif
/* The great kdev_t changeover in 2.5.x */
#include <linux/kdev_t.h>
#ifndef minor
......@@ -48,11 +37,7 @@
#endif /* Linux version < 2.4.12 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,18)
#include <asm/spinlock.h>
#else
#include <linux/spinlock.h>
#endif
#ifndef list_for_each_safe
#define list_for_each_safe(pos, n, head) \
......@@ -61,6 +46,10 @@
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,5)
#define pte_offset_kernel pte_offset
#endif
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
......@@ -78,7 +67,8 @@ typedef u16 nodeid_t;
#define LOCAL_BUS 0xffc0
#define ALL_NODES 0x003f
#define NODE_BUS_FMT "%d:%d"
/* Can be used to consistently print a node/bus ID. */
#define NODE_BUS_FMT "%02d:%04d"
#define NODE_BUS_ARGS(nodeid) \
(nodeid & NODE_MASK), ((nodeid & BUS_MASK) >> 6)
......
......@@ -12,11 +12,11 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/completion.h>
#include <linux/delay.h>
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#endif
......@@ -61,70 +61,122 @@ static rwlock_t unit_directory_lock = RW_LOCK_UNLOCKED;
static LIST_HEAD(host_info_list);
static spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
/* Disables use of the hotplug calls. */
static int nodemgr_disable_hotplug = 0;
struct host_info {
struct hpsb_host *host;
struct list_head list;
struct completion started;
struct completion exited;
wait_queue_head_t wait;
struct semaphore reset_sem;
int pid;
};
#ifdef CONFIG_PROC_FS
static int raw1394_read_proc(char *buffer, char **start, off_t offset,
int size, int *eof, void *data )
#define PUTF(fmt, args...) out += sprintf(out, fmt, ## args)
static int raw1394_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
struct list_head *lh;
struct node_entry *ne;
int disp_size = 0;
char display_str[1024];
int len;
char *out = page;
#define PUTF(fmt, args...) disp_size += sprintf(display_str, fmt, ## args); strcat(buffer,display_str)
buffer[0] = '\0';
list_for_each(lh, &node_list) {
struct list_head *l;
int ud_count = 0;
ne = list_entry(lh, struct node_entry, list);
if (!ne)
continue;
PUTF("Node[" NODE_BUS_FMT "] GUID[%016Lx]:\n",
NODE_BUS_ARGS(ne->nodeid), (unsigned long long)ne->guid);
if (ne->host != NULL && ne->host->node_id == ne->nodeid) {
PUTF("\tNodes connected : %d\n", ne->host->node_count);
PUTF("\tSelfIDs received: %d\n", ne->host->selfid_count);
PUTF("\tOwn ID : 0x%08x\n",ne->host->node_id);
PUTF("\tIrm ID : 0x%08x\n",ne->host->irm_id);
PUTF("\tBusMgr ID : 0x%08x\n",ne->host->busmgr_id);
PUTF("\tBR IR IC II IB\n");
PUTF("\t%02d %02d %02d %02d %02d\n",
ne->host->in_bus_reset, ne->host->is_root,
ne->host->is_cycmst, ne->host->is_irm,
ne->host->is_busmgr);
}
PUTF("\tVendor ID: %s [0x%06x]\n",
/* Generic Node information */
PUTF(" Vendor ID: `%s' [0x%06x]\n",
ne->vendor_name ?: "Unknown", ne->vendor_id);
PUTF("\tCapabilities: 0x%06x\n", ne->capabilities);
PUTF("\tirmc=%d cmc=%d isc=%d bmc=%d pmc=%d cyc_clk_acc=%d max_rec=%d gen=%d lspd=%d\n",
PUTF(" Capabilities: 0x%06x\n", ne->capabilities);
PUTF(" Bus Options:\n");
PUTF(" IRMC(%d) CMC(%d) ISC(%d) BMC(%d) PMC(%d) GEN(%d)\n"
" LSPD(%d) MAX_REC(%d) CYC_CLK_ACC(%d)\n",
ne->busopt.irmc, ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
ne->busopt.pmc, ne->busopt.cyc_clk_acc, ne->busopt.max_rec,
ne->busopt.generation, ne->busopt.lnkspd);
ne->busopt.pmc, ne->busopt.generation, ne->busopt.lnkspd,
ne->busopt.max_rec, ne->busopt.cyc_clk_acc);
/* If this is the host entry, output some info about it aswell */
if (ne->host != NULL && ne->host->node_id == ne->nodeid) {
PUTF(" Host Node Status:\n");
PUTF(" Host Driver : %s\n", ne->host->driver->name);
PUTF(" Nodes connected : %d\n", ne->host->node_count);
PUTF(" Nodes active : %d\n", ne->host->nodes_active);
PUTF(" SelfIDs received: %d\n", ne->host->selfid_count);
PUTF(" Irm ID : [" NODE_BUS_FMT "]\n",
NODE_BUS_ARGS(ne->host->irm_id));
PUTF(" BusMgr ID : [" NODE_BUS_FMT "]\n",
NODE_BUS_ARGS(ne->host->busmgr_id));
PUTF(" In Bus Reset : %s\n", ne->host->in_bus_reset ? "yes" : "no");
PUTF(" Root : %s\n", ne->host->is_root ? "yes" : "no");
PUTF(" Cycle Master : %s\n", ne->host->is_cycmst ? "yes" : "no");
PUTF(" IRM : %s\n", ne->host->is_irm ? "yes" : "no");
PUTF(" Bus Manager : %s\n", ne->host->is_busmgr ? "yes" : "no");
}
/* Now the unit directories */
list_for_each (l, &ne->unit_directories) {
struct unit_directory *ud = list_entry (l, struct unit_directory, node_list);
PUTF(" Unit Directory %d:\n", ud_count++);
if (ud->flags & UNIT_DIRECTORY_VENDOR_ID)
PUTF(" Vendor/Model ID: %s [%06x]",
ud->vendor_name ?: "Unknown", ud->vendor_id);
else if (ud->flags & UNIT_DIRECTORY_MODEL_ID) /* Have to put something */
PUTF(" Vendor/Model ID: %s [%06x]",
ne->vendor_name ?: "Unknown", ne->vendor_id);
if (ud->flags & UNIT_DIRECTORY_MODEL_ID)
PUTF(" / %s [%06x]", ud->model_name ?: "Unknown", ud->model_id);
PUTF("\n");
if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
PUTF(" Software Specifier ID: %06x\n", ud->specifier_id);
if (ud->flags & UNIT_DIRECTORY_VERSION)
PUTF(" Software Version: %06x\n", ud->version);
if (ud->driver)
PUTF(" Driver: %s\n", ud->driver->name);
PUTF(" Length (in quads): %d\n", ud->count);
}
}
#undef PUTF
return disp_size;
len = out - page;
len -= off;
if (len < count) {
*eof = 1;
if (len <= 0)
return 0;
} else
len = count;
*start = page + off;
return len;
}
#undef PUTF
#endif /* CONFIG_PROC_FS */
static void nodemgr_process_config_rom(struct node_entry *ne,
quadlet_t busoptions);
static int nodemgr_read_quadlet(struct hpsb_host *host,
nodeid_t nodeid, octlet_t address,
quadlet_t *quad)
nodeid_t nodeid, unsigned int generation,
octlet_t address, quadlet_t *quad)
{
int i;
int ret = 0;
for (i = 0; i < 3; i++) {
ret = hpsb_read(host, nodeid, address, quad, 4);
ret = hpsb_read(host, nodeid, generation, address, quad, 4);
if (ret != -EAGAIN)
break;
}
......@@ -134,17 +186,17 @@ static int nodemgr_read_quadlet(struct hpsb_host *host,
}
static int nodemgr_size_text_leaf(struct hpsb_host *host,
nodeid_t nodeid,
nodeid_t nodeid, unsigned int generation,
octlet_t address)
{
quadlet_t quad;
int size = 0;
if (nodemgr_read_quadlet(host, nodeid, address, &quad))
if (nodemgr_read_quadlet(host, nodeid, generation, address, &quad))
return -1;
if (CONFIG_ROM_KEY(quad) == CONFIG_ROM_DESCRIPTOR_LEAF) {
/* This is the offset. */
address += 4 * CONFIG_ROM_VALUE(quad);
if (nodemgr_read_quadlet(host, nodeid, address, &quad))
if (nodemgr_read_quadlet(host, nodeid, generation, address, &quad))
return -1;
/* Now we got the size of the text descriptor leaf. */
size = CONFIG_ROM_LEAF_LENGTH(quad);
......@@ -152,21 +204,20 @@ static int nodemgr_size_text_leaf(struct hpsb_host *host,
return size;
}
static int nodemgr_read_text_leaf(struct hpsb_host *host,
nodeid_t nodeid,
static int nodemgr_read_text_leaf(struct node_entry *ne,
octlet_t address,
quadlet_t *quadp)
{
quadlet_t quad;
int i, size, ret;
if (nodemgr_read_quadlet(host, nodeid, address, &quad)
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation, address, &quad)
&& CONFIG_ROM_KEY(quad) != CONFIG_ROM_DESCRIPTOR_LEAF)
return -1;
/* This is the offset. */
address += 4 * CONFIG_ROM_VALUE(quad);
if (nodemgr_read_quadlet(host, nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation, address, &quad))
return -1;
/* Now we got the size of the text descriptor leaf. */
......@@ -176,7 +227,7 @@ static int nodemgr_read_text_leaf(struct hpsb_host *host,
address += 4;
for (i = 0; i < 2; i++, address += 4, quadp++) {
if (nodemgr_read_quadlet(host, nodeid, address, quadp))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation, address, quadp))
return -1;
}
......@@ -184,7 +235,7 @@ static int nodemgr_read_text_leaf(struct hpsb_host *host,
ret = -ENXIO;
for (; size > 0; size--, address += 4, quadp++) {
for (i = 0; i < 3; i++) {
ret = hpsb_read(host, nodeid, address, quadp, 4);
ret = hpsb_read(ne->host, ne->nodeid, ne->generation, address, quadp, 4);
if (ret != -EAGAIN)
break;
}
......@@ -196,7 +247,7 @@ static int nodemgr_read_text_leaf(struct hpsb_host *host,
}
static struct node_entry *nodemgr_scan_root_directory
(struct hpsb_host *host, nodeid_t nodeid)
(struct hpsb_host *host, nodeid_t nodeid, unsigned int generation)
{
octlet_t address;
quadlet_t quad;
......@@ -206,11 +257,11 @@ static struct node_entry *nodemgr_scan_root_directory
address = CSR_REGISTER_BASE + CSR_CONFIG_ROM;
if (nodemgr_read_quadlet(host, nodeid, address, &quad))
if (nodemgr_read_quadlet(host, nodeid, generation, address, &quad))
return NULL;
address += 4 + CONFIG_ROM_BUS_INFO_LENGTH(quad) * 4;
if (nodemgr_read_quadlet(host, nodeid, address, &quad))
if (nodemgr_read_quadlet(host, nodeid, generation, address, &quad))
return NULL;
length = CONFIG_ROM_ROOT_LENGTH(quad);
address += 4;
......@@ -218,30 +269,22 @@ static struct node_entry *nodemgr_scan_root_directory
size = 0;
total_size = sizeof(struct node_entry);
for (; length > 0; length--, address += 4) {
if (nodemgr_read_quadlet(host, nodeid, address, &quad))
if (nodemgr_read_quadlet(host, nodeid, generation, address, &quad))
return NULL;
code = CONFIG_ROM_KEY(quad);
if (code == CONFIG_ROM_VENDOR_ID) {
if (code == CONFIG_ROM_VENDOR_ID && length > 0) {
/* Check if there is a text descriptor leaf
immediately after this. */
length--;
if (length <= 0)
break;
address += 4;
size = nodemgr_size_text_leaf(host, nodeid,
address);
switch (size) {
case -1:
return NULL;
break;
case 0:
break;
default:
size = nodemgr_size_text_leaf(host, nodeid, generation,
address + 4);
if (size > 0) {
address += 4;
length--;
total_size += (size + 1) * sizeof (quadlet_t);
break;
}
break;
else if (size < 0)
return NULL;
}
}
ne = kmalloc(total_size, SLAB_ATOMIC);
......@@ -259,12 +302,13 @@ static struct node_entry *nodemgr_scan_root_directory
}
static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoptions,
struct hpsb_host *host, nodeid_t nodeid)
struct hpsb_host *host,
nodeid_t nodeid, unsigned int generation)
{
struct node_entry *ne;
unsigned long flags;
ne = nodemgr_scan_root_directory (host, nodeid);
ne = nodemgr_scan_root_directory (host, nodeid, generation);
if (!ne) return NULL;
INIT_LIST_HEAD(&ne->list);
......@@ -272,7 +316,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoption
ne->host = host;
ne->nodeid = nodeid;
ne->guid = guid;
atomic_set(&ne->generation, get_hpsb_generation(ne->host));
ne->generation = generation;
write_lock_irqsave(&node_lock, flags);
list_add_tail(&ne->list, &node_list);
......@@ -281,7 +325,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoption
nodemgr_process_config_rom (ne, busoptions);
HPSB_DEBUG("%s added: Node[" NODE_BUS_FMT "] GUID[%016Lx] [%s]",
(host->node_id == nodeid) ? "Local host" : "Device",
(host->node_id == nodeid) ? "Host" : "Device",
NODE_BUS_ARGS(nodeid), (unsigned long long)guid,
ne->vendor_name ?: "Unknown");
......@@ -323,7 +367,7 @@ static struct unit_directory *nodemgr_scan_unit_directory
int length, size, total_size, count;
int vendor_name_size, model_name_size;
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation, address, &quad))
return NULL;
length = CONFIG_ROM_DIRECTORY_LENGTH(quad) ;
address += 4;
......@@ -338,8 +382,8 @@ static struct unit_directory *nodemgr_scan_unit_directory
int code;
quadlet_t value;
retry:
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation,
address, &quad))
return NULL;
code = CONFIG_ROM_KEY(quad);
value = CONFIG_ROM_VALUE(quad);
......@@ -373,32 +417,27 @@ static struct unit_directory *nodemgr_scan_unit_directory
break;
}
if (todo) {
if (todo && length > 0) {
/* Check if there is a text descriptor leaf
immediately after this. */
length--;
if (length <= 0)
break;
address += 4;
size = nodemgr_size_text_leaf(ne->host,
ne->nodeid,
address);
if (todo | UNIT_DIRECTORY_VENDOR_TEXT)
ne->generation,
address + 4);
if (todo == UNIT_DIRECTORY_VENDOR_TEXT)
vendor_name_size = size;
else
model_name_size = size;
switch (size) {
case -1:
return NULL;
break;
case 0:
goto retry;
break;
default:
if (size > 0) {
address += 4;
length--;
flags |= todo;
total_size += (size + 1) * sizeof (quadlet_t);
break;
}
else if (size < 0)
return NULL;
}
}
total_size += count * sizeof (quadlet_t);
......@@ -435,7 +474,8 @@ static void nodemgr_process_unit_directory(struct node_entry *ne,
ud->ne = ne;
ud->address = address;
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation,
address, &quad))
goto unit_directory_error;
length = CONFIG_ROM_DIRECTORY_LENGTH(quad) ;
address += 4;
......@@ -446,7 +486,8 @@ static void nodemgr_process_unit_directory(struct node_entry *ne,
quadlet_t value;
quadlet_t *quadp;
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation,
address, &quad))
goto unit_directory_error;
code = CONFIG_ROM_KEY(quad) ;
value = CONFIG_ROM_VALUE(quad);
......@@ -459,9 +500,7 @@ static void nodemgr_process_unit_directory(struct node_entry *ne,
length--;
address += 4;
quadp = &(ud->quadlets[ud->count]);
if (nodemgr_read_text_leaf(ne->host,
ne->nodeid,
address,
if (nodemgr_read_text_leaf(ne, address,
quadp) == 0
&& quadp[0] == 0
&& quadp[1] == 0) {
......@@ -481,9 +520,7 @@ static void nodemgr_process_unit_directory(struct node_entry *ne,
length--;
address += 4;
quadp = &(ud->quadlets[ud->count + ud->vendor_name_size + 1]);
if (nodemgr_read_text_leaf(ne->host,
ne->nodeid,
address,
if (nodemgr_read_text_leaf(ne, address,
quadp) == 0
&& quadp[0] == 0
&& quadp[1] == 0) {
......@@ -568,11 +605,13 @@ static void nodemgr_process_root_directory(struct node_entry *ne)
address = CSR_REGISTER_BASE + CSR_CONFIG_ROM;
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation,
address, &quad))
return;
address += 4 + CONFIG_ROM_BUS_INFO_LENGTH(quad) * 4;
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation,
address, &quad))
return;
length = CONFIG_ROM_ROOT_LENGTH(quad);
address += 4;
......@@ -580,7 +619,8 @@ static void nodemgr_process_root_directory(struct node_entry *ne)
for (; length > 0; length--, address += 4) {
int code, value;
if (nodemgr_read_quadlet(ne->host, ne->nodeid, address, &quad))
if (nodemgr_read_quadlet(ne->host, ne->nodeid, ne->generation,
address, &quad))
return;
code = CONFIG_ROM_KEY(quad);
value = CONFIG_ROM_VALUE(quad);
......@@ -593,9 +633,7 @@ static void nodemgr_process_root_directory(struct node_entry *ne)
if (ne->vendor_name != NULL) {
length--;
address += 4;
if (nodemgr_read_text_leaf(ne->host,
ne->nodeid,
address,
if (nodemgr_read_text_leaf(ne, address,
ne->quadlets)
!= 0
|| ne->quadlets [0] != 0
......@@ -631,6 +669,10 @@ static void nodemgr_call_policy(char *verb, struct unit_directory *ud)
char *argv [3], **envp, *buf, *scratch;
int i = 0, value;
/* User requested to disable hotplug when module was loaded. */
if (nodemgr_disable_hotplug)
return;
if (!hotplug_path [0])
return;
if (!current->fs->root)
......@@ -902,7 +944,8 @@ static void nodemgr_process_config_rom(struct node_entry *ne,
* the to take whatever actions required.
*/
static void nodemgr_update_node(struct node_entry *ne, quadlet_t busoptions,
struct hpsb_host *host, nodeid_t nodeid)
struct hpsb_host *host,
nodeid_t nodeid, unsigned int generation)
{
struct list_head *lh;
struct unit_directory *ud;
......@@ -917,7 +960,7 @@ static void nodemgr_update_node(struct node_entry *ne, quadlet_t busoptions,
nodemgr_process_config_rom (ne, busoptions);
/* Since that's done, we can declare this record current */
atomic_set(&ne->generation, get_hpsb_generation(ne->host));
ne->generation = generation;
list_for_each (lh, &ne->unit_directories) {
ud = list_entry (lh, struct unit_directory, node_list);
......@@ -926,73 +969,59 @@ static void nodemgr_update_node(struct node_entry *ne, quadlet_t busoptions,
}
}
static int read_businfo_block(struct hpsb_host *host, nodeid_t nodeid,
static int read_businfo_block(struct hpsb_host *host, nodeid_t nodeid, unsigned int generation,
quadlet_t *buffer, int buffer_length)
{
octlet_t base = CSR_REGISTER_BASE + CSR_CONFIG_ROM;
int retries = 3;
int header_count;
octlet_t addr = CSR_REGISTER_BASE + CSR_CONFIG_ROM;
unsigned header_size;
quadlet_t quad;
int ret;
retry_configrom:
if (!retries--) {
HPSB_ERR("Giving up on node " NODE_BUS_FMT
" for ConfigROM probe, too many errors",
NODE_BUS_ARGS(nodeid));
return -1;
}
int i;
header_count = 0;
header_size = 0;
/* IEEE P1212 says that devices should support 64byte block
* reads, aligned on 64byte boundaries. That doesn't seem to
* work though, and we are forced to doing quadlet sized
* reads. */
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
HPSB_INFO("Initiating ConfigROM request for node " NODE_BUS_FMT,
NODE_BUS_ARGS(nodeid));
#endif
/* Now, P1212 says that devices should support 64byte block
* reads, aligned on 64byte boundaries. That doesn't seem
* to work though, and we are forced to doing quadlet
* sized reads. */
ret = hpsb_read(host, nodeid, base, &quad, 4);
if (ret) {
HPSB_ERR("ConfigROM quadlet transaction error (%d) for node " NODE_BUS_FMT,
ret, NODE_BUS_ARGS(nodeid));
goto retry_configrom;
if (nodemgr_read_quadlet(host, nodeid, generation,
addr, &buffer[0]) < 0) {
HPSB_ERR("ConfigROM quadlet transaction error for node "
NODE_BUS_FMT, NODE_BUS_ARGS(nodeid));
return -1;
}
buffer[header_count++] = be32_to_cpu(quad);
header_size = buffer[0] >> 24;
addr += 4;
if (header_size < 4) {
HPSB_INFO("Node " NODE_BUS_FMT " has non-standard ROM format (%d quads), "
"cannot parse", NODE_BUS_ARGS(nodeid), header_size);
HPSB_INFO("Node " NODE_BUS_FMT " has non-standard ROM "
"format (%d quads), cannot parse",
NODE_BUS_ARGS(nodeid), header_size);
return -1;
}
while (header_count <= header_size && header_count < buffer_length) {
ret = hpsb_read(host, nodeid, base + (header_count<<2), &quad, 4);
if (ret) {
HPSB_ERR("ConfigROM quadlet transaction error (%d) for " NODE_BUS_FMT,
ret, NODE_BUS_ARGS(nodeid));
goto retry_configrom;
for (i = 1; i < buffer_length; i++, addr += 4) {
if (nodemgr_read_quadlet(host, nodeid, generation,
addr, &buffer[i]) < 0) {
HPSB_ERR("ConfigROM quadlet transaction "
"error for node " NODE_BUS_FMT,
NODE_BUS_ARGS(nodeid));
return -1;
}
buffer[header_count++] = be32_to_cpu(quad);
}
return 0;
}
}
static void nodemgr_remove_node(struct node_entry *ne)
{
unsigned long flags;
HPSB_DEBUG("%s removed: Node[" NODE_BUS_FMT "] GUID[%016Lx] [%s]",
(ne->host->node_id == ne->nodeid) ? "Local host" : "Device",
(ne->host->node_id == ne->nodeid) ? "Host" : "Device",
NODE_BUS_ARGS(ne->nodeid), (unsigned long long)ne->guid,
ne->vendor_name ?: "Unknown");
......@@ -1007,7 +1036,8 @@ static void nodemgr_remove_node(struct node_entry *ne)
/* This is where we probe the nodes for their information and provided
* features. */
static void nodemgr_node_probe_one(struct hpsb_host *host, nodeid_t nodeid)
static void nodemgr_node_probe_one(struct hpsb_host *host,
nodeid_t nodeid, int generation)
{
struct node_entry *ne;
quadlet_t buffer[5];
......@@ -1016,7 +1046,8 @@ static void nodemgr_node_probe_one(struct hpsb_host *host, nodeid_t nodeid)
/* We need to detect when the ConfigROM's generation has changed,
* so we only update the node's info when it needs to be. */
if (read_businfo_block (host, nodeid, buffer, sizeof(buffer) >> 2))
if (read_businfo_block (host, nodeid, generation,
buffer, sizeof(buffer) >> 2))
return;
if (buffer[1] != IEEE1394_BUSID_MAGIC) {
......@@ -1030,14 +1061,14 @@ static void nodemgr_node_probe_one(struct hpsb_host *host, nodeid_t nodeid)
ne = hpsb_guid_get_entry(guid);
if (!ne)
nodemgr_create_node(guid, buffer[2], host, nodeid);
nodemgr_create_node(guid, buffer[2], host, nodeid, generation);
else
nodemgr_update_node(ne, buffer[2], host, nodeid);
nodemgr_update_node(ne, buffer[2], host, nodeid, generation);
return;
}
static void nodemgr_node_probe_cleanup(struct hpsb_host *host)
static void nodemgr_node_probe_cleanup(struct hpsb_host *host, unsigned int generation)
{
unsigned long flags;
struct list_head *lh, *next;
......@@ -1057,7 +1088,7 @@ static void nodemgr_node_probe_cleanup(struct hpsb_host *host)
* node was removed, or it failed the above probe. Either
* way, we remove references to it, since they are
* invalid. */
if (!hpsb_node_entry_valid(ne))
if (ne->generation != generation)
nodemgr_remove_node(ne);
}
write_unlock_irqrestore(&node_lock, flags);
......@@ -1067,22 +1098,46 @@ static void nodemgr_node_probe_cleanup(struct hpsb_host *host)
static void nodemgr_node_probe(struct hpsb_host *host)
{
int nodecount = host->node_count;
int count;
struct selfid *sid = (struct selfid *)host->topology_map;
nodeid_t nodeid = LOCAL_BUS;
unsigned int generation;
/* Pause for 1 second, to make sure things settle down. If
* schedule_timeout returns non-zero, it means we caught a signal
* and need to return. */
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout (HZ))
return;
/* Now get the generation in which the node ID's we collect
* are valid. During the bus scan we will use this generation
* for the read transactions, so that if another reset occurs
* during the scan the transactions will fail instead of
* returning bogus data. */
generation = get_hpsb_generation(host);
/* Scan each node on the bus */
for (; nodecount; nodecount--, nodeid++, sid++) {
while (sid->extended)
sid++;
if (!sid->link_active)
for (count = host->selfid_count; count; count--, sid++) {
if (sid->extended)
continue;
nodemgr_node_probe_one(host, nodeid);
if (!sid->link_active) {
nodeid++;
continue;
}
nodemgr_node_probe_one(host, nodeid++, generation);
}
/* Cleanup if needed */
nodemgr_node_probe_cleanup(host);
/* If we had a bus reset while we were scanning the bus, it is
* possible that we did not probe all nodes. In that case, we
* skip the clean up for now, since we could remove nodes that
* were still on the bus. The bus reset increased
* hi->reset_sem, so there's a bus scan pending which will do
* the clean up eventually. */
if (generation == get_hpsb_generation(host))
nodemgr_node_probe_cleanup(host, generation);
return;
}
......@@ -1090,29 +1145,21 @@ static void nodemgr_node_probe(struct hpsb_host *host)
static int nodemgr_host_thread(void *__hi)
{
struct host_info *hi = (struct host_info *)__hi;
lock_kernel();
/* No userlevel access needed */
daemonize();
strcpy(current->comm, "NodeMgr");
complete(&hi->started);
strcpy(current->comm, "knodemgrd");
/* Sit and wait for a signal to probe the nodes on the bus. This
* happens when we get a bus reset. */
do {
interruptible_sleep_on(&hi->wait);
if (!signal_pending(current))
nodemgr_node_probe(hi->host);
} while (!signal_pending(current));
* happens when we get a bus reset. */
while (!down_interruptible(&hi->reset_sem))
nodemgr_node_probe(hi->host);
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
HPSB_DEBUG ("NodeMgr: Exiting thread for %s", hi->host->driver->name);
#endif
unlock_kernel();
complete_and_exit(&hi->exited, 0);
}
......@@ -1140,23 +1187,57 @@ struct node_entry *hpsb_nodeid_get_entry(nodeid_t nodeid)
return ne;
}
struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne)
/* The following four convenience functions use a struct node_entry
* for addressing a node on the bus. They are intended for use by any
* process context, not just the nodemgr thread, so we need to be a
* little careful when reading out the node ID and generation. The
* thing that can go wrong is that we get the node ID, then a bus
* reset occurs, and then we read the generation. The node ID is
* possibly invalid, but the generation is current, and we end up
* sending a packet to a the wrong node.
*
* The solution is to make sure we read the generation first, so that
* if a reset occurs in the process, we end up with a stale generation
* and the transactions will fail instead of silently using wrong node
* ID's.
*/
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
{
if (atomic_read(&ne->generation) != get_hpsb_generation(ne->host))
return NULL;
if (ne->nodeid == ne->host->node_id) return ne->host;
return NULL;
pkt->host = ne->host;
pkt->generation = ne->generation;
barrier();
pkt->node_id = ne->nodeid;
}
int hpsb_guid_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
int hpsb_node_read(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length)
{
if (atomic_read(&ne->generation) != get_hpsb_generation(ne->host))
return 0;
unsigned int generation = ne->generation;
pkt->host = ne->host;
pkt->node_id = ne->nodeid;
pkt->generation = atomic_read(&ne->generation);
return 1;
barrier();
return hpsb_read(ne->host, ne->nodeid, generation,
addr, buffer, length);
}
int hpsb_node_write(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length)
{
unsigned int generation = ne->generation;
barrier();
return hpsb_write(ne->host, ne->nodeid, generation,
addr, buffer, length);
}
int hpsb_node_lock(struct node_entry *ne, u64 addr,
int extcode, quadlet_t *data, quadlet_t arg)
{
unsigned int generation = ne->generation;
barrier();
return hpsb_lock(ne->host, ne->nodeid, generation,
addr, extcode, data, arg);
}
static void nodemgr_add_host(struct hpsb_host *host)
......@@ -1169,23 +1250,24 @@ static void nodemgr_add_host(struct hpsb_host *host)
return;
}
/* We simply initialize the struct here. We don't start the thread
* until the first bus reset. */
/* Initialize the hostinfo here and start the thread. The
* thread blocks on the reset semaphore until a bus reset
* happens. */
hi->host = host;
INIT_LIST_HEAD(&hi->list);
init_completion(&hi->started);
init_completion(&hi->exited);
init_waitqueue_head(&hi->wait);
hi->pid = kernel_thread(nodemgr_host_thread, hi, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
sema_init(&hi->reset_sem, 0);
hi->pid = kernel_thread(nodemgr_host_thread, hi,
CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
if (hi->pid < 0) {
HPSB_ERR ("NodeMgr: failed to start NodeMgr thread for %s", host->driver->name);
HPSB_ERR ("NodeMgr: failed to start NodeMgr thread for %s",
host->driver->name);
kfree(hi);
return;
}
wait_for_completion(&hi->started);
spin_lock_irqsave (&host_info_lock, flags);
list_add_tail (&hi->list, &host_info_list);
spin_unlock_irqrestore (&host_info_lock, flags);
......@@ -1216,8 +1298,7 @@ static void nodemgr_host_reset(struct hpsb_host *host)
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
HPSB_DEBUG ("NodeMgr: Processing host reset for %s", host->driver->name);
#endif
wake_up(&hi->wait);
up(&hi->reset_sem);
done_reset_host:
spin_unlock_irqrestore (&host_info_lock, flags);
......@@ -1254,7 +1335,7 @@ static void nodemgr_remove_host(struct hpsb_host *host)
ne = list_entry(lh, struct node_entry, list);
if (ne->host == host)
nodemgr_remove_node(ne);
nodemgr_remove_node(ne);
}
write_unlock_irqrestore(&node_lock, flags);
......@@ -1279,8 +1360,9 @@ static struct hpsb_highlevel *hl;
#define PROC_ENTRY "devices"
void init_ieee1394_nodemgr(void)
void init_ieee1394_nodemgr(int disable_hotplug)
{
nodemgr_disable_hotplug = disable_hotplug;
#ifdef CONFIG_PROC_FS
if (!create_proc_read_entry(PROC_ENTRY, 0444, ieee1394_procfs_entry, raw1394_read_proc, NULL))
HPSB_ERR("Can't create devices procfs entry");
......
......@@ -125,7 +125,7 @@ struct node_entry {
struct hpsb_host *host; /* Host this node is attached to */
nodeid_t nodeid; /* NodeID */
struct bus_options busopt; /* Bus Options */
atomic_t generation; /* Synced with hpsb generation */
unsigned int generation; /* Synced with hpsb generation */
/* The following is read from the config rom */
u32 vendor_id;
......@@ -138,7 +138,7 @@ struct node_entry {
static inline int hpsb_node_entry_valid(struct node_entry *ne)
{
return atomic_read(&ne->generation) == get_hpsb_generation(ne->host);
return ne->generation == get_hpsb_generation(ne->host);
}
/*
......@@ -170,10 +170,17 @@ struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
* number). It will at least reliably fail so that you don't accidentally and
* unknowingly send your packet to the wrong node.
*/
int hpsb_guid_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
int hpsb_node_read(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length);
int hpsb_node_write(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length);
int hpsb_node_lock(struct node_entry *ne, u64 addr,
int extcode, quadlet_t *data, quadlet_t arg);
void init_ieee1394_nodemgr(void);
void init_ieee1394_nodemgr(int disable_hotplug);
void cleanup_ieee1394_nodemgr(void);
#endif /* _IEEE1394_NODEMGR_H */
......@@ -96,7 +96,6 @@
#include <linux/poll.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/tqueue.h>
#include <linux/delay.h>
......@@ -161,7 +160,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
static char version[] __devinitdata =
"$Revision: 1.91 $ Ben Collins <bcollins@debian.org>";
"$Revision: 1.101 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
MODULE_PARM(attempt_root,"i");
......@@ -487,7 +486,6 @@ static void ohci_init_config_rom(struct ti_ohci *ohci);
/* Global initialization */
static void ohci_initialize(struct ti_ohci *ohci)
{
int i;
quadlet_t buf;
spin_lock_init(&ohci->phy_reg_lock);
......@@ -535,14 +533,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Don't accept phy packets into AR request context */
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
/* Initialize IR dma */
for (i=0;i<ohci->nb_iso_rcv_ctx;i++) {
reg_write(ohci, OHCI1394_IsoRcvContextControlClear+32*i,
0xffffffff);
reg_write(ohci, OHCI1394_IsoRcvContextMatch+32*i, 0);
reg_write(ohci, OHCI1394_IsoRcvCommandPtr+32*i, 0);
}
/* Set bufferFill, isochHeader, multichannel for IR context */
reg_write(ohci, OHCI1394_IsoRcvContextControlSet, 0xd0000000);
......@@ -553,13 +543,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
/* Initialize IT dma */
for (i=0;i<ohci->nb_iso_xmit_ctx;i++) {
reg_write(ohci, OHCI1394_IsoXmitContextControlClear+32*i,
0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitCommandPtr+32*i, 0);
}
/* Clear the interrupt mask */
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
......@@ -615,7 +598,8 @@ static void ohci_initialize(struct ti_ohci *ohci)
OHCI1394_respTxComplete |
OHCI1394_reqTxComplete |
OHCI1394_isochRx |
OHCI1394_isochTx);
OHCI1394_isochTx |
OHCI1394_cycleInconsistent);
/* Enable link */
reg_write(ohci, OHCI1394_HCControlSet, 0x00020000);
......@@ -936,6 +920,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
} else {
MOD_DEC_USE_COUNT;
}
retval = 1;
break;
case ISO_LISTEN_CHANNEL:
......@@ -1098,6 +1083,14 @@ static void ohci_irq_handler(int irq, void *dev_id,
return;
}
if (event & OHCI1394_cycleInconsistent) {
/* We subscribe to the cycleInconsistent event only to
* clear the corresponding event bit... otherwise,
* isochronous cycleMatch DMA wont work. */
DBGMSG(ohci->id, "OHCI1394_cycleInconsistent");
event &= ~OHCI1394_cycleInconsistent;
}
if (event & OHCI1394_busReset) {
/* The busReset event bit can't be cleared during the
* selfID phase, so we disable busReset interrupts, to
......@@ -1265,8 +1258,8 @@ static void ohci_irq_handler(int irq, void *dev_id,
/* Finally, we clear the busReset event and reenable
* the busReset interrupt. */
spin_lock_irqsave(&ohci->event_lock, flags);
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
spin_unlock_irqrestore(&ohci->event_lock, flags);
event &= ~OHCI1394_selfIDComplete;
}
......@@ -1509,6 +1502,15 @@ static void dma_trm_tasklet (unsigned long data)
/* this packet hasn't been sent yet*/
break;
if (!(ack & 0x10)) {
/* XXX: This is an OHCI evt_* code. We need to handle
* this specially! For right now, we just fake an
* ackx_send_error. */
PRINT(KERN_DEBUG, ohci->id, "Received OHCI evt_* error 0x%x",
ack & 0xf);
ack = (ack & 0xffe0) | ACK_BUSY_A;
}
#ifdef OHCI1394_DEBUG
if (datasize)
DBGMSG(ohci->id,
......@@ -1942,7 +1944,8 @@ static void ohci_init_config_rom(struct ti_ohci *ohci)
/* Root directory */
cf_unit_begin(&cr, 1);
cf_put_keyval(&cr, 0x03, 0x00005e); /* Vendor ID */
/* Vendor ID */
cf_put_keyval(&cr, 0x03, reg_read(ohci,OHCI1394_VendorID) & 0xFFFFFF);
cf_put_refer(&cr, 0x81, 2); /* Textual description unit */
cf_put_keyval(&cr, 0x0c, 0x0083c0); /* Node capabilities */
/* NOTE: Add other unit referers here, and append at bottom */
......@@ -2287,7 +2290,7 @@ static void __devexit ohci1394_pci_remove(struct pci_dev *pdev)
#endif /* CONFIG_ALL_PPC */
pci_set_drvdata(ohci->dev, NULL);
kfree(ohci);
hpsb_unref_host(ohci->host);
}
#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
......
......@@ -22,6 +22,7 @@
#define _OHCI1394_H
#include "ieee1394_types.h"
#include <asm/io.h>
#define OHCI1394_DRIVER_NAME "ohci1394"
......@@ -192,7 +193,7 @@ struct ti_ohci {
/* IRQ hooks, for video1394 and dv1394 */
#define OHCI1394_MAX_IRQ_HOOKS 4
#define OHCI1394_MAX_IRQ_HOOKS 16
struct ohci1394_irq_hook {
void (*irq_handler) (int card, quadlet_t isoRecvEvent,
......@@ -314,7 +315,7 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define OHCI1394_AsRspRcvCommandPtr 0x1EC
/* Isochronous transmit registers */
/* Add (32 * n) for context n */
/* Add (16 * n) for context n */
#define OHCI1394_IsoXmitContextBase 0x200
#define OHCI1394_IsoXmitContextControlSet 0x200
#define OHCI1394_IsoXmitContextControlClear 0x204
......@@ -363,6 +364,28 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define DMA_CTL_BRANCH 0x000c0000
#define DMA_CTL_WAIT 0x00030000
/* OHCI evt_* error types, table 3-2 of the OHCI 1.1 spec. */
#define EVT_NO_STATUS 0x0 /* No event status */
#define EVT_RESERVED 0x1 /* Reserved, not used !!! */
#define EVT_LONG_PACKET 0x2 /* The revc data was longer than the buf */
#define EVT_MISSING_ACK 0x3 /* A subaction gap was detected before an ack
arrived, or recv'd ack had a parity error */
#define EVT_UNDERRUN 0x4 /* Underrun on corresponding FIFO, packet
truncated */
#define EVT_OVERRUN 0x5 /* A recv FIFO overflowed on reception of ISO
packet */
#define EVT_DESCRIPTOR_READ 0x6 /* An unrecoverable error occured while host was
reading a descriptor block */
#define EVT_DATA_READ 0x7 /* An error occured while host controller was
attempting to read from host memory in the data
stage of descriptor processing */
#define EVT_DATA_WRITE 0x8 /* An error occured while host controller was
attempting to write either during the data stage
of descriptor processing, or when processing a single
16-bit host memory write */
#define EVT_BUS_RESET 0x9 /* Identifies a PHY packet in the recv buffer as
being a synthesized bus reset packet */
#define OHCI1394_TCODE_PHY 0xE
void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
......
......@@ -29,7 +29,6 @@
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/smp_lock.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
#include <asm/io.h>
......@@ -56,6 +55,7 @@
#define PRINTD(level, card, fmt, args...) do {} while (0)
#endif
static struct hpsb_host_driver *lynx_driver;
static unsigned int card_id;
......@@ -614,7 +614,7 @@ static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
static struct file_operations aux_ops = {
OWNER_THIS_MODULE
owner: THIS_MODULE,
read: mem_read,
write: mem_write,
poll: aux_poll,
......@@ -639,42 +639,31 @@ static int mem_open(struct inode *inode, struct file *file)
enum { t_rom, t_aux, t_ram } type;
struct memdata *md;
V22_COMPAT_MOD_INC_USE_COUNT;
if (cid < PCILYNX_MINOR_AUX_START) {
/* just for completeness */
V22_COMPAT_MOD_DEC_USE_COUNT;
return -ENXIO;
} else if (cid < PCILYNX_MINOR_ROM_START) {
cid -= PCILYNX_MINOR_AUX_START;
if (cid >= num_of_cards || !cards[cid].aux_port) {
V22_COMPAT_MOD_DEC_USE_COUNT;
if (cid >= num_of_cards || !cards[cid].aux_port)
return -ENXIO;
}
type = t_aux;
} else if (cid < PCILYNX_MINOR_RAM_START) {
cid -= PCILYNX_MINOR_ROM_START;
if (cid >= num_of_cards || !cards[cid].local_rom) {
V22_COMPAT_MOD_DEC_USE_COUNT;
if (cid >= num_of_cards || !cards[cid].local_rom)
return -ENXIO;
}
type = t_rom;
} else {
/* WARNING: Know what you are doing when opening RAM.
* It is currently used inside the driver! */
cid -= PCILYNX_MINOR_RAM_START;
if (cid >= num_of_cards || !cards[cid].local_ram) {
V22_COMPAT_MOD_DEC_USE_COUNT;
if (cid >= num_of_cards || !cards[cid].local_ram)
return -ENXIO;
}
type = t_ram;
}
md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
if (md == NULL) {
V22_COMPAT_MOD_DEC_USE_COUNT;
if (md == NULL)
return -ENOMEM;
}
md->lynx = &cards[cid];
md->cid = cid;
......@@ -700,11 +689,7 @@ static int mem_open(struct inode *inode, struct file *file)
static int mem_release(struct inode *inode, struct file *file)
{
struct memdata *md = (struct memdata *)file->private_data;
kfree(md);
V22_COMPAT_MOD_DEC_USE_COUNT;
kfree(file->private_data);
return 0;
}
......@@ -732,9 +717,8 @@ static unsigned int aux_poll(struct file *file, poll_table *pt)
loff_t mem_llseek(struct file *file, loff_t offs, int orig)
{
loff_t newoffs = -1;
loff_t newoffs;
lock_kernel();
switch (orig) {
case 0:
newoffs = offs;
......@@ -744,14 +728,13 @@ loff_t mem_llseek(struct file *file, loff_t offs, int orig)
break;
case 2:
newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
}
if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) {
unlock_kernel();
break;
default:
return -EINVAL;
}
unlock_kernel();
if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
file->f_pos = newoffs;
return newoffs;
}
......@@ -1229,7 +1212,7 @@ static void remove_card(struct pci_dev *dev)
}
tasklet_kill(&lynx->iso_rcv.tq);
kfree(lynx);
hpsb_unref_host(lynx->host);
}
......
......@@ -21,10 +21,7 @@
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
#include <linux/devfs_fs_kernel.h>
#endif
#include "ieee1394.h"
#include "ieee1394_types.h"
......@@ -919,13 +916,9 @@ static int raw1394_open(struct inode *inode, struct file *file)
return -ENXIO;
}
V22_COMPAT_MOD_INC_USE_COUNT;
fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
if (fi == NULL) {
V22_COMPAT_MOD_DEC_USE_COUNT;
if (fi == NULL)
return -ENOMEM;
}
memset(fi, 0, sizeof(struct file_info));
......@@ -988,7 +981,6 @@ static int raw1394_release(struct inode *inode, struct file *file)
kfree(fi);
V22_COMPAT_MOD_DEC_USE_COUNT;
return 0;
}
......@@ -1001,12 +993,12 @@ static struct hpsb_highlevel_ops hl_ops = {
};
static struct file_operations file_ops = {
OWNER_THIS_MODULE
read: raw1394_read,
write: raw1394_write,
poll: raw1394_poll,
open: raw1394_open,
release: raw1394_release,
owner: THIS_MODULE,
read: raw1394_read,
write: raw1394_write,
poll: raw1394_poll,
open: raw1394_open,
release: raw1394_release,
};
static int __init init_raw1394(void)
......
......@@ -27,6 +27,7 @@
* driver. It also registers as a SCSI lower-level driver in order to accept
* SCSI commands for transport using SBP-2.
*
*
* Driver Loading:
*
* Currently, the SBP-2 driver is supported only as a module. Because the
......@@ -45,7 +46,7 @@
*
* Currently, the SBP-2 driver will catch any attached SBP-2 devices during the
* initial scsi bus scan (when the driver is first loaded). To add or remove
* SBP-2 devices after this initial scan (i.e. if you plug-in or un-plug a
* SBP-2 devices "after" this initial scan (i.e. if you plug-in or un-plug a
* device after the SBP-2 driver is loaded), you must either use the scsi procfs
* add-single-device, remove-single-device, or a shell script such as
* rescan-scsi-bus.sh.
......@@ -76,6 +77,19 @@
* fdisk, mkfs, etc.).
*
*
* Module Load Options:
*
* sbp2_max_speed - Force max speed allowed
* (2 = 400mb, 1 = 200mb, 0 = 100mb. default = 2)
* sbp2_serialize_io - Serialize all I/O coming down from the scsi drivers
* (0 = deserialized, 1 = serialized, default = 0)
* sbp2_max_sectors, - Change max sectors per I/O supported (default = 255)
* sbp2_max_outstanding_cmds - Change max outstanding concurrent commands (default = 8)
* sbp2_max_cmds_per_lun - Change max concurrent commands per sbp2 device (default = 1)
*
* (e.g. insmod sbp2 sbp2_serialize_io = 1)
*
*
* Current Support:
*
* The SBP-2 driver is still in an early state, but supports a variety of devices.
......@@ -83,7 +97,8 @@
* performance of more than 25 MBytes/s on individual drives (limit of the media
* transfer rate).
*
* Following are the devices that have been tested successfully:
*
* Following are a sampling of devices that have been tested successfully:
*
* - Western Digital IEEE-1394 hard drives
* - Maxtor IEEE-1394 hard drives
......@@ -102,7 +117,7 @@
* - APDrives IEEE-1394 hard drives
* - Fujitsu IEEE-1394 MO drives
* - Sony IEEE-1394 CD-RW drives
* - Epson IEEE-1394 scanner
* - Epson IEEE-1394 scanners
* - ADS IEEE-1394 memory stick and compact flash readers
* - SBP-2 bridge-based devices (LSI, Oxford Semiconductor, Indigita bridges)
* - Various other standard IEEE-1394 hard drives and enclosures
......@@ -117,10 +132,6 @@
*
* Current Issues:
*
* - Currently, all I/O from the scsi stack is serialized by default, as there
* are some stress issues under investigation with deserialized I/O. To enable
* deserialized I/O for testing, do "insmod sbp2 serialize_io=0"
*
* - Error Handling: SCSI aborts and bus reset requests are handled somewhat
* but the code needs additional debugging.
*
......@@ -246,6 +257,28 @@
* 1394 devices (Peerless, Jazz). Also a bit of clean-up of the
* driver, thanks to H.J.Lu (hjl@lucon.org). Removed mode_sense_hack
* module load option, as it's been fixed in the 2.4 scsi stack.
* 02/10/02 - Added support for max_sectors, minor fix for inquiry command, make
* up sbp2 device type from inquiry response data if not part of
* device's 1394 unit directory. (JSG)
* 02/18/02 - Code clean-up and enhancements: (JSG)
* * Finish cleaning out hacked code for dealing with broken sbp2 devices
* which do not support requests of 128KB or greater. Now use
* max_sectors scsi host entry to limit transfer sizes.
* * Change status fifo address from a single address to a set of addresses,
* with each sbp2 device having it's own status fifo address. This makes
* it easier to match the status write to the sbp2 device instance.
* * Minor change to use lun when logging into sbp2 devices. First step in
* supporting multi-lun devices such as CD/DVD changer devices.
* * Added a new module load option for setting max sectors. For use by folk
* who'd like to bump up the max scsi transfer size supported.
* * Enabled deserialized operation by default, allowing for better performance,
* particularily when running with multiple sbp2 devices. For debugging,
* you may enable serialization through use of the sbp2_serialize_io module
* load option (e.g. insmod sbp2 sbp2_serialize_io=1).
* 02/20/02 - Added a couple additional module load options.
* Needed to bump down max commands per lun because of the !%@&*^# QPS CDRW
* drive I have, which doesn't seem to get along with other sbp2 devices
* (or handle linked commands well).
*/
......@@ -301,7 +334,7 @@
*/
/*
* Change max_speed on module load if you have a bad IEEE-1394 controller
* Change sbp2_max_speed on module load if you have a bad IEEE-1394 controller
* that has trouble running 2KB packets at 400mb.
*
* NOTE: On certain OHCI parts I have seen short packets on async transmit
......@@ -309,31 +342,51 @@
* bump down the speed if you are running into problems.
*
* Valid values:
* max_speed = 2 (default: max speed 400mb)
* max_speed = 1 (max speed 200mb)
* max_speed = 0 (max speed 100mb)
* sbp2_max_speed = 2 (default: max speed 400mb)
* sbp2_max_speed = 1 (max speed 200mb)
* sbp2_max_speed = 0 (max speed 100mb)
*/
MODULE_PARM(sbp2_max_speed,"i");
MODULE_PARM_DESC(sbp2_max_speed, "Force max speed (2 = 400mb default, 1 = 200mb, 0 = 100mb)");
static int sbp2_max_speed = SPEED_400;
/*
* Set sbp2_serialize_io to 1 if you'd like only one scsi command sent down to
* us at a time (debugging). This might be necessary for very badly behaved sbp2 devices.
*/
MODULE_PARM(sbp2_serialize_io,"i");
MODULE_PARM_DESC(sbp2_serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)");
static int sbp2_serialize_io = 0; /* serialize I/O - available for debugging purposes */
/*
* Bump up sbp2_max_sectors if you'd like to support very large sized transfers. Please note
* that some older sbp2 bridge chips are broken for transfers greater or equal to 128KB.
* Default is a value of 255 sectors, or just under 128KB (at 512 byte sector size). I can note
* that the Oxsemi sbp2 chipsets have no problems supporting very large transfer sizes.
*/
MODULE_PARM(max_speed,"i");
MODULE_PARM_DESC(max_speed, "Force down max speed (2 = 400mb default, 1 = 200mb, 0 = 100mb)");
static int max_speed = SPEED_400;
MODULE_PARM(sbp2_max_sectors,"i");
MODULE_PARM_DESC(sbp2_max_sectors, "Change max sectors per I/O supported (default = 255)");
static int sbp2_max_sectors = SBP2_MAX_SECTORS;
/*
* Set serialize_io to 1 if you'd like only one scsi command sent down to
* us at a time (debugging).
* Adjust sbp2_max_outstanding_cmds to tune performance if you have many sbp2 devices attached
* (or if you need to do some debugging).
*/
MODULE_PARM(serialize_io,"i");
MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (debugging)");
static int serialize_io = 1; /* serialize I/O until stress issues are resolved */
MODULE_PARM(sbp2_max_outstanding_cmds,"i");
MODULE_PARM_DESC(sbp2_max_outstanding_cmds, "Change max outstanding concurrent commands (default = 8)");
static int sbp2_max_outstanding_cmds = SBP2SCSI_MAX_OUTSTANDING_CMDS;
/*
* Set no_large_packets to 1 if you'd like to limit the size of requests
* sent down to us (normally the sbp2 driver will break up any requests to
* any individual devices with 128KB transfer size limits). Sets max s/g
* list elements to 0x1f in size and disables s/g clustering.
* Adjust sbp2_max_cmds_per_lun to tune performance. Enabling more than one concurrent/linked
* command per sbp2 device may allow some performance gains, but some older sbp2 devices have
* firmware bugs resulting in problems when linking commands... so, enable this with care.
* I can note that the Oxsemi OXFW911 sbp2 chipset works very well with large numbers of
* concurrent/linked commands. =)
*/
MODULE_PARM(no_large_packets,"i");
MODULE_PARM_DESC(no_large_packets, "Do not allow large transfers from scsi drivers (debugging)");
static int no_large_packets = 0;
MODULE_PARM(sbp2_max_cmds_per_lun,"i");
MODULE_PARM_DESC(sbp2_max_cmds_per_lun, "Change max concurrent commands per sbp2 device (default = 1)");
static int sbp2_max_cmds_per_lun = SBP2SCSI_MAX_CMDS_PER_LUN;
/*
* Export information about protocols/devices supported by this driver.
......@@ -351,9 +404,14 @@ static struct ieee1394_device_id sbp2_id_table[] = {
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
/*
* Debug levels, configured via kernel config.
* Debug levels, configured via kernel config, or enable here.
*/
/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
/* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
#define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
static u32 global_outstanding_command_orbs = 0;
......@@ -390,9 +448,9 @@ static u32 global_outstanding_dmas = 0;
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
#else
#define SBP2_DEBUG(fmt, args...)
#define SBP2_INFO(fmt, args...)
#define SBP2_NOTICE(fmt, args...)
#define SBP2_WARN(fmt, args...)
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
#endif
#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
......@@ -490,7 +548,19 @@ static int sbp2util_create_request_packet_pool(struct sbp2scsi_host_info *hi)
struct hpsb_packet *packet;
int i;
/* Create SBP2_MAX_REQUEST_PACKETS number of request packets. */
hi->request_packet = kmalloc(sizeof(struct sbp2_request_packet) * SBP2_MAX_REQUEST_PACKETS,
GFP_KERNEL);
if (!hi->request_packet) {
SBP2_ERR("sbp2util_create_request_packet_pool - packet allocation failed!");
return(-ENOMEM);
}
memset(hi->request_packet, 0, sizeof(struct sbp2_request_packet) * SBP2_MAX_REQUEST_PACKETS);
/*
* Create a pool of request packets. Just take the max supported
* concurrent commands and multiply by two to be safe...
*/
for (i=0; i<SBP2_MAX_REQUEST_PACKETS; i++) {
/*
......@@ -547,6 +617,7 @@ static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi)
}
}
kfree(hi->request_packet);
sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
return;
......@@ -561,7 +632,7 @@ static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi)
*/
static struct sbp2_request_packet *
sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
nodeid_t node, u64 addr,
struct node_entry *ne, u64 addr,
size_t data_size,
quadlet_t data) {
struct list_head *lh;
......@@ -590,12 +661,11 @@ sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
INIT_LIST_HEAD(&packet->list);
sema_init(&packet->state_change, 0);
packet->state = hpsb_unused;
packet->generation = get_hpsb_generation(hi->host);
packet->data_be = 1;
hpsb_node_fill_packet(ne, packet);
packet->host = hi->host;
packet->tlabel = get_tlabel(hi->host, node, 1);
packet->node_id = node;
packet->tlabel = get_tlabel(hi->host, packet->node_id, 1);
if (!data_size) {
fill_async_writequad(packet, addr, data);
......@@ -717,8 +787,8 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
}
/*
* This functions finds the sbp2_command for a given outstanding command
* orb. Only looks at the inuse list.
* This function finds the sbp2_command for a given outstanding command
* orb.Only looks at the inuse list.
*/
static struct sbp2_command_info *sbp2util_find_command_for_orb(
struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
......@@ -745,7 +815,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
}
/*
* This functions finds the sbp2_command for a given outstanding SCpnt.
* This function finds the sbp2_command for a given outstanding SCpnt.
* Only looks at the inuse list.
*/
static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt)
......@@ -788,7 +858,6 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
command = list_entry(lh, struct sbp2_command_info, list);
command->Current_done = Current_done;
command->Current_SCpnt = Current_SCpnt;
command->linked = 0;
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
} else {
SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
......@@ -878,7 +947,8 @@ int sbp2_init(void)
* Register our sbp2 status address space...
*/
hpsb_register_addrspace(sbp2_hl_handle, &sbp2_ops, SBP2_STATUS_FIFO_ADDRESS,
SBP2_STATUS_FIFO_ADDRESS + sizeof(struct sbp2_status_block));
SBP2_STATUS_FIFO_ADDRESS +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2SCSI_MAX_SCSI_IDS+1));
hpsb_register_protocol(&sbp2_driver);
......@@ -936,7 +1006,8 @@ static void sbp2_update(struct unit_directory *ud)
if (sbp2_reconnect_device(hi, scsi_id)) {
/* Ok, reconnect has failed. Perhaps we didn't
/*
* Ok, reconnect has failed. Perhaps we didn't
* reconnect fast enough. Try doing a regular login.
*/
if (sbp2_login_device(hi, scsi_id)) {
......@@ -1008,8 +1079,10 @@ static void sbp2_add_host(struct hpsb_host *host)
/* Register our host with the SCSI stack. */
hi->scsi_host = scsi_register (&scsi_driver_template, sizeof(void *));
if (hi->scsi_host)
if (hi->scsi_host) {
hi->scsi_host->hostdata[0] = (unsigned long)hi;
hi->scsi_host->max_id = SBP2SCSI_MAX_SCSI_IDS;
}
scsi_driver_template.present++;
return;
......@@ -1190,18 +1263,10 @@ static int sbp2_start_device(struct sbp2scsi_host_info *hi, struct unit_director
/*
* Knock the total command orbs down if we are serializing I/O
*/
if (serialize_io) {
if (sbp2_serialize_io) {
scsi_id->sbp2_total_command_orbs = 2; /* one extra for good measure */
}
/*
* Allocate some extra command orb structures for devices with
* 128KB limit.
*/
if (scsi_id->sbp2_firmware_revision == SBP2_128KB_BROKEN_FIRMWARE) {
scsi_id->sbp2_total_command_orbs *= 4;
}
/*
* Find an empty spot to stick our scsi id instance data.
*/
......@@ -1348,14 +1413,19 @@ static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_insta
scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(LOGIN_REQUEST);
scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(1); /* Exclusive access to device */
scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
/* Set the lun if we were able to pull it from the device's unit directory */
if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
}
SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
scsi_id->login_orb->passwd_resp_lengths =
ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO;
scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->id);
scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
......@@ -1383,7 +1453,7 @@ static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_insta
sbp2util_cpu_to_be32_buffer(data, 8);
SBP2_DEBUG("sbp2_login_device: prepared to write");
hpsb_write(hi->host, LOCAL_BUS | scsi_id->ne->nodeid, scsi_id->sbp2_management_agent_addr, data, 8);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
SBP2_DEBUG("sbp2_login_device: written");
/*
......@@ -1472,7 +1542,8 @@ static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_inst
scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->logout_orb->reserved5 = 0x0;
scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO;
scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->id);
scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
......@@ -1488,7 +1559,7 @@ static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_inst
data[1] = scsi_id->logout_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
hpsb_write(hi->host, LOCAL_BUS | scsi_id->ne->nodeid, scsi_id->sbp2_management_agent_addr, data, 8);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
/* Wait for device to logout...1 second. */
sleep_on_timeout(&scsi_id->sbp2_login_wait, HZ);
......@@ -1526,7 +1597,8 @@ static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_i
scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->reconnect_orb->reserved5 = 0x0;
scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO;
scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->id);
scsi_id->reconnect_orb->status_FIFO_hi =
(ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
......@@ -1547,7 +1619,7 @@ static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_i
data[1] = scsi_id->reconnect_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
hpsb_write(hi->host, LOCAL_BUS | scsi_id->ne->nodeid, scsi_id->sbp2_management_agent_addr, data, 8);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
/*
* Wait for reconnect status... but, only if the device has not
......@@ -1601,7 +1673,7 @@ static int sbp2_set_busy_timeout(struct sbp2scsi_host_info *hi, struct scsi_id_i
*/
data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
if (hpsb_write(hi->host, LOCAL_BUS | scsi_id->ne->nodeid, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) {
if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) {
SBP2_ERR("sbp2_set_busy_timeout error");
}
......@@ -1620,6 +1692,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id)
SBP2_DEBUG("sbp2_parse_unit_directory");
/* Initialize some fields, in case an entry does not exist */
scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED;
scsi_id->sbp2_management_agent_addr = 0x0;
scsi_id->sbp2_command_set_spec_id = 0x0;
scsi_id->sbp2_command_set = 0x0;
scsi_id->sbp2_unit_characteristics = 0x0;
scsi_id->sbp2_firmware_revision = 0x0;
ud = scsi_id->ud;
/* Handle different fields in the unit directory, based on keys */
......@@ -1683,7 +1763,9 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id)
*/
scsi_id->sbp2_firmware_revision
= CONFIG_ROM_VALUE(ud->quadlets[i]);
if (scsi_id->sbp2_firmware_revision ==
SBP2_DEBUG("sbp2_firmware_revision = %x",
(unsigned int) scsi_id->sbp2_firmware_revision);
if ((scsi_id->sbp2_firmware_revision & 0xffff00) ==
SBP2_128KB_BROKEN_FIRMWARE) {
SBP2_WARN("warning: Bridge chipset supports 128KB max transfer size");
}
......@@ -1713,8 +1795,8 @@ static int sbp2_max_speed_and_size(struct sbp2scsi_host_info *hi, struct scsi_id
+ (scsi_id->ne->nodeid & NODE_MASK)];
/* Bump down our speed if the user requested it */
if (scsi_id->speed_code > max_speed) {
scsi_id->speed_code = max_speed;
if (scsi_id->speed_code > sbp2_max_speed) {
scsi_id->speed_code = sbp2_max_speed;
SBP2_ERR("Forcing SBP-2 max speed down to %s",
hpsb_speedto_str[scsi_id->speed_code]);
}
......@@ -1744,7 +1826,7 @@ static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instan
* Ok, let's write to the target's management agent register
*/
agent_reset_request_packet =
sbp2util_allocate_write_request_packet(hi, LOCAL_BUS | scsi_id->ne->nodeid,
sbp2util_allocate_write_request_packet(hi, scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr +
SBP2_AGENT_RESET_OFFSET,
0, ntohl(SBP2_AGENT_RESET_DATA));
......@@ -2034,8 +2116,8 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
struct sbp2_command_orb *command_orb = &command->command_orb;
outstanding_orb_incr;
SBP2_ORB_DEBUG("sending command orb %p, linked = %x, total orbs = %x",
command_orb, command->linked, global_outstanding_command_orbs);
SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
command_orb, global_outstanding_command_orbs);
pci_dma_sync_single(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb),
......@@ -2054,7 +2136,7 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
if (hpsb_node_entry_valid(scsi_id->ne)) {
command_request_packet =
sbp2util_allocate_write_request_packet(hi, LOCAL_BUS | scsi_id->ne->nodeid,
sbp2util_allocate_write_request_packet(hi, scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr +
SBP2_ORB_POINTER_OFFSET, 8, 0);
......@@ -2099,13 +2181,12 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
PCI_DMA_BIDIRECTIONAL);
/*
* Only ring the doorbell if we need to (first parts of
* linked orbs don't need this).
* Ring the doorbell
*/
if (!command->linked && hpsb_node_entry_valid(scsi_id->ne)) {
if (hpsb_node_entry_valid(scsi_id->ne)) {
command_request_packet = sbp2util_allocate_write_request_packet(hi,
LOCAL_BUS | scsi_id->ne->nodeid,
scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET,
0, cpu_to_be32(command->command_orb_dma));
......@@ -2150,24 +2231,6 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
/*
* Check for broken devices that can't handle greater than 128K
* transfers, and deal with them in a hacked ugly way.
*/
if ((scsi_id->sbp2_firmware_revision == SBP2_128KB_BROKEN_FIRMWARE) &&
(SCpnt->request_bufflen > SBP2_BROKEN_FIRMWARE_MAX_TRANSFER) &&
(device_type == TYPE_DISK) &&
(SCpnt->use_sg) &&
(*cmd == READ_6 || *cmd == READ_10 || *cmd == WRITE_6 || *cmd == WRITE_10)) {
/*
* Darn, a broken device. We'll need to split up the
* transfer ourselves.
*/
sbp2_send_split_command(hi, scsi_id, SCpnt, done);
return(0);
}
/*
* Allocate a command orb and s/g structure
*/
......@@ -2179,11 +2242,11 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
/*
* The scsi stack sends down a request_bufflen which does not match the
* length field in the scsi cdb. This causes some sbp2 devices to
* reject this inquiry command. Fix is to fix request_bufflen to match
* the value in the cdb.
* reject this inquiry command. Hack fix is to set both buff length and
* length field in cdb to 36. This gives best compatibility.
*/
if (*cmd == INQUIRY) {
request_bufflen = cmd[4];
request_bufflen = cmd[4] = 0x24;
}
/*
......@@ -2215,120 +2278,6 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
return(0);
}
/*
* This function is called for broken sbp2 device, where we have to break
* up large transfers.
*/
static int sbp2_send_split_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{
unchar *cmd = (unchar *) SCpnt->cmnd;
struct scatterlist *sgpnt = (struct scatterlist *) SCpnt->request_buffer;
struct sbp2_command_info *command;
unsigned int i, block_count, block_address, block_size;
unsigned int current_sg = 0;
unsigned int total_transfer = 0;
unsigned int total_sg = 0;
unchar new_cmd[12];
memset(new_cmd, 0, 12);
memcpy(new_cmd, cmd, COMMAND_SIZE(*cmd));
/*
* Turns command into 10 byte version
*/
sbp2_check_sbp2_command(new_cmd);
/*
* Pull block size, block address, block count from command sent down
*/
block_count = (cmd[7] << 8) | cmd[8];
block_address = (cmd[2] << 24) | (cmd[3] << 16) | (cmd[4] << 8) | cmd[5];
block_size = SCpnt->request_bufflen/block_count;
/*
* Walk the scsi s/g list to determine how much we can transfer in one pop
*/
for (i=0; i<SCpnt->use_sg; i++) {
total_transfer+=sgpnt[i].length;
total_sg++;
if (total_transfer > SBP2_BROKEN_FIRMWARE_MAX_TRANSFER) {
/*
* Back everything up one, so that we're less than 128KB
*/
total_transfer-=sgpnt[i].length;
total_sg--;
i--;
command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done, hi);
if (!command) {
return(-EIO);
}
/*
* This is not the final piece, so mark it as linked
*/
command->linked = 1;
block_count = total_transfer/block_size;
new_cmd[2] = (unchar) (block_address >> 24) & 0xff;
new_cmd[3] = (unchar) (block_address >> 16) & 0xff;
new_cmd[4] = (unchar) (block_address >> 8) & 0xff;
new_cmd[5] = (unchar) block_address & 0xff;
new_cmd[7] = (unchar) (block_count >> 8) & 0xff;
new_cmd[8] = (unchar) block_count & 0xff;
block_address+=block_count;
sbp2_create_command_orb(hi, scsi_id, command, new_cmd, total_sg,
total_transfer, &sgpnt[current_sg],
SCpnt->sc_data_direction);
/*
* Link up the orb, and ring the doorbell if needed
*/
memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
sbp2_link_orb_command(hi, scsi_id, command);
current_sg += total_sg;
total_sg = 0;
total_transfer = 0;
}
}
/*
* Get the last piece...
*/
command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done, hi);
if (!command) {
return(-EIO);
}
block_count = total_transfer/block_size;
new_cmd[2] = (unchar) (block_address >> 24) & 0xff;
new_cmd[3] = (unchar) (block_address >> 16) & 0xff;
new_cmd[4] = (unchar) (block_address >> 8) & 0xff;
new_cmd[5] = (unchar) block_address & 0xff;
new_cmd[7] = (unchar) (block_count >> 8) & 0xff;
new_cmd[8] = (unchar) block_count & 0xff;
sbp2_create_command_orb(hi, scsi_id, command, new_cmd, total_sg,
total_transfer, &sgpnt[current_sg],
SCpnt->sc_data_direction);
/*
* Link up the orb, and ring the doorbell if needed
*/
memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
sbp2_link_orb_command(hi, scsi_id, command);
return(0);
}
/*
* This function deals with command set differences between Linux scsi
......@@ -2468,6 +2417,15 @@ static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi,
case INQUIRY:
/*
* If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill
* this information in from the inquiry response data. Lun is set to zero.
*/
if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
}
/*
* Make sure data length is ok. Minimum length is 36 bytes
*/
......@@ -2529,7 +2487,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
{
struct sbp2scsi_host_info *hi = NULL;
struct scsi_id_instance_data *scsi_id = NULL;
int i;
u32 id;
unsigned long flags;
Scsi_Cmnd *SCpnt = NULL;
u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
......@@ -2554,17 +2512,11 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
sbp2_spin_lock(&hi->sbp2_command_lock, flags);
/*
* Find our scsi_id structure
* Find our scsi_id structure by looking at the status fifo address written to by
* the sbp2 device.
*/
for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
if (hi->scsi_id[i]) {
if ((hi->scsi_id[i]->ne->nodeid & NODE_MASK) == (nodeid & NODE_MASK)) {
scsi_id = hi->scsi_id[i];
SBP2_DEBUG("SBP-2 status write from node %x", scsi_id->ne->nodeid);
break;
}
}
}
id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
scsi_id = hi->scsi_id[id];
if (!scsi_id) {
SBP2_ERR("scsi_id is NULL - device is gone?");
......@@ -2605,12 +2557,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
SCpnt = command->Current_SCpnt;
sbp2util_mark_command_completed(scsi_id, command);
if (SCpnt && !command->linked) {
if (SCpnt) {
/*
* See if the target stored any scsi status information
*/
if (length > 8) {
if (STATUS_GET_LENGTH(scsi_id->status_block.ORB_offset_hi_misc) > 1) {
/*
* Translate SBP-2 status to SCSI sense data
*/
......@@ -2657,7 +2609,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
return(RCODE_COMPLETE);
}
/**************************************
* SCSI interface related section
......@@ -2722,8 +2673,7 @@ static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
}
/*
* Check to see if there is a command in progress and just return
* busy (to be queued later)
* Check to see if we are in the middle of a bus reset.
*/
if (!hpsb_node_entry_valid(scsi_id->ne)) {
SBP2_ERR("Bus reset in progress - rejecting command");
......@@ -2769,7 +2719,7 @@ static void sbp2scsi_complete_all_commands(struct sbp2scsi_host_info *hi,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(scsi_id, command);
if (command->Current_SCpnt && !command->linked) {
if (command->Current_SCpnt) {
void (*done)(Scsi_Cmnd *) = command->Current_done;
command->Current_SCpnt->result = status << 16;
done (command->Current_SCpnt);
......@@ -2873,14 +2823,22 @@ static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi
* or hot-plug...
*/
if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) && (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
SBP2_INFO("UNIT ATTENTION - return busy");
SBP2_DEBUG("UNIT ATTENTION - return busy");
SCpnt->result = DID_BUS_BUSY << 16;
}
/*
* Tell scsi stack that we're done with this command
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_lock_irq(&io_request_lock);
done (SCpnt);
spin_unlock_irq(&io_request_lock);
#else
spin_lock_irq(&hi->scsi_host->host_lock);
done (SCpnt);
spin_unlock_irq(&hi->scsi_host->host_lock);
#endif
return;
}
......@@ -2903,30 +2861,27 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
/*
* Right now, just return any matching command structures
* to the free pool (there may be more than one because of
* broken up/linked commands).
* to the free pool.
*/
sbp2_spin_lock(&hi->sbp2_command_lock, flags);
do {
command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
if (command) {
SBP2_DEBUG("Found command to abort");
pci_dma_sync_single(hi->host->pdev,
command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL);
pci_dma_sync_single(hi->host->pdev,
command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(scsi_id, command);
if (command->Current_SCpnt && !command->linked) {
void (*done)(Scsi_Cmnd *) = command->Current_done;
command->Current_SCpnt->result = DID_ABORT << 16;
done (command->Current_SCpnt);
}
command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
if (command) {
SBP2_DEBUG("Found command to abort");
pci_dma_sync_single(hi->host->pdev,
command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL);
pci_dma_sync_single(hi->host->pdev,
command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(scsi_id, command);
if (command->Current_SCpnt) {
void (*done)(Scsi_Cmnd *) = command->Current_done;
command->Current_SCpnt->result = DID_ABORT << 16;
done (command->Current_SCpnt);
}
} while (command);
}
/*
* Initiate a fetch agent reset.
......@@ -2945,12 +2900,13 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
static int sbp2scsi_reset (Scsi_Cmnd *SCpnt)
{
struct sbp2scsi_host_info *hi = (struct sbp2scsi_host_info *) SCpnt->host->hostdata[0];
struct scsi_id_instance_data *scsi_id = hi->scsi_id[SCpnt->target];
SBP2_ERR("reset requested");
if (hi) {
if (scsi_id) {
SBP2_ERR("Generating IEEE-1394 bus reset");
hpsb_reset_bus(hi->host, LONG_RESET);
sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
}
return(SUCCESS);
......@@ -2990,7 +2946,7 @@ static int sbp2scsi_detect (Scsi_Host_Template *tpnt)
SBP2_DEBUG("sbp2scsi_detect");
/*
* Call sbp2_init to register with the ieee1394 stack. This
* Call sbp2_init to register with the ieee1394 stack. This
* results in a callback to sbp2_add_host for each ieee1394
* host controller currently registered, and for each of those
* we register a scsi host with the scsi stack.
......@@ -3013,8 +2969,19 @@ static const char *sbp2scsi_info (struct Scsi_Host *host)
if (!hi) /* shouldn't happen, but... */
return "IEEE-1394 SBP-2 protocol driver";
sprintf(info, "IEEE-1394 SBP-2 protocol driver\nHost Driver: %s\nSerial I/O: %s",
hi->host->driver->name, serialize_io ? "yes" : "no");
sprintf(info, "IEEE-1394 SBP-2 protocol driver (host: %s)\n"
"SBP-2 module load options:\n"
"- Max speed supported: %s\n"
"- Max sectors per I/O supported: %d\n"
"- Max outstanding commands supported: %d\n"
"- Max outstanding commands per lun supported: %d\n"
"- Serialized I/O (debug): %s",
hi->host->driver->name,
hpsb_speedto_str[sbp2_max_speed],
sbp2_max_sectors,
sbp2_max_outstanding_cmds,
sbp2_max_cmds_per_lun,
sbp2_serialize_io ? "yes" : "no");
return info;
}
......@@ -3025,17 +2992,6 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
MODULE_LICENSE("GPL");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,26)
#define PROC_SCSI_SBP2 PROC_SCSI_NOT_PRESENT /* What should I use? */
static struct proc_dir_entry proc_scsi_sbp2scsi = {
low_ino: PROC_SCSI_SBP2,
namelen: SBP2_DEVICE_NAME_SIZE,
name: SBP2_DEVICE_NAME,
mode: S_IFDIR | S_IRUGO | S_IXUGO,
nlink: 2
};
#endif
/* SCSI host template */
static Scsi_Host_Template scsi_driver_template = {
name: "IEEE-1394 SBP-2 protocol driver",
......@@ -3047,20 +3003,14 @@ static Scsi_Host_Template scsi_driver_template = {
eh_bus_reset_handler: sbp2scsi_reset,
eh_host_reset_handler: sbp2scsi_reset,
bios_param: sbp2scsi_biosparam,
can_queue: SBP2SCSI_MAX_OUTSTANDING_CMDS,
this_id: -1,
sg_tablesize: SBP2_MAX_SG_ELEMENTS,
cmd_per_lun: SBP2SCSI_MAX_CMDS_PER_LUN,
use_clustering: SBP2_CLUSTERING,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
use_new_eh_code: TRUE,
#endif
emulated: 1,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,26)
proc_name: SBP2_DEVICE_NAME,
#else
proc_dir: &proc_scsi_sbp2scsi,
#endif
};
static int sbp2_module_init(void)
......@@ -3068,29 +3018,26 @@ static int sbp2_module_init(void)
SBP2_DEBUG("sbp2_module_init");
/*
* Module load option for force one command at a time
* Module load debug option to force one command at a time (serializing I/O)
*/
if (serialize_io) {
if (sbp2_serialize_io) {
SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)");
scsi_driver_template.can_queue = 1;
scsi_driver_template.cmd_per_lun = 1;
} else {
scsi_driver_template.can_queue = sbp2_max_outstanding_cmds;
scsi_driver_template.cmd_per_lun = sbp2_max_cmds_per_lun;
}
/*
* Module load option to limit max size of requests from the
* scsi drivers
/*
* Set max sectors (module load option). Default is 255 sectors.
*/
if (no_large_packets) {
SBP2_ERR("Driver forced to limit max transfer size "
"(no_large_packets = 1)");
scsi_driver_template.sg_tablesize = 0x1f;
scsi_driver_template.use_clustering = DISABLE_CLUSTERING;
}
scsi_driver_template.max_sectors = sbp2_max_sectors;
/*
* Ideally we would register our scsi_driver_template with the
* scsi stack and after that register with the ieee1394 stack
* and process the add_host callbacks. However, the detect
* and process the add_host callbacks. However, the detect
* function in the scsi host template requires that we find at
* least one host, so we "nest" the registrations by calling
* sbp2_init from the detect function.
......
......@@ -195,10 +195,28 @@ struct sbp2_status_block {
* Miscellaneous SBP2 related config rom defines
*/
#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL /* for write posting! */
/*
* The status fifo address definition below is used as a status base, with a chunk
* separately assigned for each sbp2 device detected. For example, 0xfffe00000000ULL
* is used for the first sbp2 device detected, 0xfffe00000020ULL for the next sbp2
* device, and so on.
*
* Note: We could use a single status fifo address for all sbp2 devices, and figure
* out which sbp2 device the status belongs to by looking at the source node id of
* the status write... but, using separate addresses for each sbp2 device allows for
* better code and the ability to support multiple luns within a single 1394 node.
*
* Also note that we choose the address range below as it is a region specified for
* write posting, where the ohci controller will automatically send an ack_complete
* when the status is written by the sbp2 device... saving a split transaction. =)
*/
#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry) ((entry) << 5)
#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset) ((offset) >> 5)
#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
#define SBP2_CSR_OFFSET_KEY 0x54
#define SBP2_UNIT_SPEC_ID_KEY 0x12
......@@ -209,8 +227,8 @@ struct sbp2_status_block {
#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
#define SBP2_FIRMWARE_REVISION_KEY 0x3c
#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f)
#define SBP2_DEVICE_LUN(q) ((q) & 0xffff)
#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f)
#define SBP2_DEVICE_LUN(q) ((q) & 0xffff)
#define SBP2_AGENT_STATE_OFFSET 0x00ULL
#define SBP2_AGENT_RESET_OFFSET 0x04ULL
......@@ -231,10 +249,14 @@ struct sbp2_status_block {
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
/*
* Other misc defines
*/
#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
#define SBP2_BROKEN_FIRMWARE_MAX_TRANSFER 0x20000
#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff
/*
* Flags for SBP-2 functions
*/
......@@ -247,9 +269,10 @@ struct sbp2_status_block {
#define SBP2_MAX_SG_ELEMENTS SG_ALL
#define SBP2_CLUSTERING ENABLE_CLUSTERING
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2SCSI_MAX_SCSI_IDS 8
#define SBP2SCSI_MAX_SCSI_IDS 16 /* Max sbp2 device instances supported */
#define SBP2SCSI_MAX_OUTSTANDING_CMDS 8 /* Max total outstanding sbp2 commands allowed at a time! */
#define SBP2SCSI_MAX_CMDS_PER_LUN 4 /* Max outstanding sbp2 commands per device - tune as needed */
#define SBP2SCSI_MAX_CMDS_PER_LUN 1 /* Max outstanding sbp2 commands per device - tune as needed */
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#ifndef TYPE_SDAD
#define TYPE_SDAD 0x0e /* simplified direct access device */
......@@ -291,13 +314,8 @@ static unchar sbp2scsi_direction_table[0x100] = {
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
};
/*
* Number of request packets available for actual sbp2 I/O requests (these are used
* for sending command and agent reset packets).
*/
#define SBP2_MAX_REQUEST_PACKETS SBP2SCSI_MAX_OUTSTANDING_CMDS /* Per host adapter instance */
#define SBP2_MAX_COMMAND_ORBS SBP2SCSI_MAX_CMDS_PER_LUN * 2 /* Per sbp2 device instance */
#define SBP2_MAX_REQUEST_PACKETS (sbp2_max_outstanding_cmds * 2)
#define SBP2_MAX_COMMAND_ORBS (sbp2_max_cmds_per_lun * 2)
/*
* Request packets structure (used for sending command and agent reset packets)
......@@ -327,7 +345,6 @@ struct sbp2_command_info {
dma_addr_t command_orb_dma ____cacheline_aligned;
Scsi_Cmnd *Current_SCpnt;
void (*Current_done)(Scsi_Cmnd *);
unsigned int linked;
/* Also need s/g structure for each sbp2 command */
struct sbp2_unrestricted_page_table scatter_gather_element[SBP2_MAX_SG_ELEMENTS] ____cacheline_aligned;
......@@ -434,7 +451,7 @@ struct sbp2scsi_host_info {
* Here is the pool of request packets. All the hpsb packets (for 1394 bus transactions)
* are allocated at init and simply re-initialized when needed.
*/
struct sbp2_request_packet request_packet[SBP2_MAX_REQUEST_PACKETS];
struct sbp2_request_packet *request_packet;
/*
* SCSI ID instance data (one for each sbp2 device instance possible)
......@@ -453,7 +470,7 @@ struct sbp2scsi_host_info {
static int sbp2util_create_request_packet_pool(struct sbp2scsi_host_info *hi);
static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi);
static struct sbp2_request_packet *sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
nodeid_t node, u64 addr,
struct node_entry *ne, u64 addr,
size_t data_size,
quadlet_t data);
static void sbp2util_free_request_packet(struct sbp2_request_packet *request_packet);
......@@ -505,8 +522,6 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
struct sbp2_command_info *command);
static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
static int sbp2_send_split_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
static void sbp2_check_sbp2_command(unchar *cmd);
static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
......
......@@ -18,6 +18,9 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* jds -- add private data to file to keep track of iso contexts associated
with each open -- so release won't kill all iso transfers */
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/list.h>
......@@ -31,18 +34,12 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/smp_lock.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/tqueue.h>
#include <linux/delay.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/sched.h>
#include <asm/bitops.h>
#include <linux/types.h>
#include <linux/wrapper.h>
#include <linux/vmalloc.h>
......@@ -70,6 +67,14 @@
#define vmalloc_32(x) vmalloc(x)
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,3))
#define remap_page_range_1394(vma, start, addr, size, prot) \
remap_page_range(start, addr, size, prot)
#else
#define remap_page_range_1394(vma, start, addr, size, prot) \
remap_page_range(vma, start, addr, size, prot)
#endif
struct it_dma_prg {
struct dma_cmd begin;
quadlet_t data[4];
......@@ -107,6 +112,8 @@ struct dma_iso_ctx {
spinlock_t lock;
unsigned int syt_offset;
int flags;
struct list_head link;
};
struct video_card {
......@@ -114,9 +121,12 @@ struct video_card {
struct list_head list;
int id;
devfs_handle_t devfs;
};
struct dma_iso_ctx **ir_context;
struct dma_iso_ctx **it_context;
struct file_ctx {
struct video_card *video;
struct list_head context_list;
struct dma_iso_ctx *current_ctx;
};
......@@ -158,35 +168,86 @@ static struct hpsb_highlevel *hl_handle = NULL;
/* Memory management functions */
/*******************************/
static inline unsigned long kvirt_to_bus(unsigned long adr)
#define MDEBUG(x) do { } while(0) /* Debug memory management */
/* [DaveM] I've recoded most of this so that:
* 1) It's easier to tell what is happening
* 2) It's more portable, especially for translating things
* out of vmalloc mapped areas in the kernel.
* 3) Less unnecessary translations happen.
*
* The code used to assume that the kernel vmalloc mappings
* existed in the page tables of every process, this is simply
* not guaranteed. We now use pgd_offset_k which is the
* defined way to get at the kernel page tables.
*/
/* Given PGD from the address space's page table, return the kernel
* virtual mapping of the physical memory mapped at ADR.
*/
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
unsigned long ret = 0UL;
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if(pte_present(pte)) {
ret = (unsigned long)
page_address(pte_page(pte));
ret |= (adr & (PAGE_SIZE - 1));
}
}
}
MDEBUG(printk("uv2kva(%lx-->%lx)", adr, ret));
return ret;
}
static inline unsigned long uvirt_to_bus(unsigned long adr)
{
unsigned long kva, ret;
kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
kva = uvirt_to_kva(pgd_offset(current->mm, adr), adr);
ret = virt_to_bus((void *)kva);
MDEBUG(printk("uv2b(%lx-->%lx)", adr, ret));
return ret;
}
static inline unsigned long kvirt_to_bus(unsigned long adr)
{
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = uvirt_to_kva(pgd_offset_k(va), va);
ret = virt_to_bus((void *)kva);
MDEBUG(printk("kv2b(%lx-->%lx)", adr, ret));
return ret;
}
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the area.
* This is used when initializing the contents of the
* area and marking the pages as reserved.
*/
static inline unsigned long kvirt_to_pa(unsigned long adr)
{
unsigned long kva, ret;
unsigned long va, kva, ret;
kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
va = VMALLOC_VMADDR(adr);
kva = uvirt_to_kva(pgd_offset_k(va), va);
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
}
static void * rvmalloc(unsigned long size)
{
void * mem;
unsigned long adr;
size=PAGE_ALIGN(size);
unsigned long adr, page;
mem=vmalloc_32(size);
if (mem)
{
......@@ -195,7 +256,8 @@ static void * rvmalloc(unsigned long size)
adr=(unsigned long) mem;
while (size > 0)
{
mem_map_reserve(vmalloc_to_page((void *)adr));
page = kvirt_to_pa(adr);
mem_map_reserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -205,14 +267,15 @@ static void * rvmalloc(unsigned long size)
static void rvfree(void * mem, unsigned long size)
{
unsigned long adr;
unsigned long adr, page;
if (mem)
{
adr=(unsigned long) mem;
while ((long) size > 0)
while (size > 0)
{
mem_map_unreserve(vmalloc_to_page((void *)adr));
page = kvirt_to_pa(adr);
mem_map_unreserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -221,52 +284,48 @@ static void rvfree(void * mem, unsigned long size)
}
/* End of code taken from bttv.c */
static int free_dma_iso_ctx(struct dma_iso_ctx **d)
static int free_dma_iso_ctx(struct dma_iso_ctx *d)
{
int i;
struct ti_ohci *ohci;
unsigned long *usage;
if ((*d)==NULL) return -1;
ohci = (struct ti_ohci *)(*d)->ohci;
DBGMSG(d->ohci->id, "Freeing dma_iso_ctx %d", d->ctx);
DBGMSG(ohci->id, "Freeing dma_iso_ctx %d", (*d)->ctx);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
ohci1394_unhook_irq(d->ohci, irq_handler, d);
ohci1394_stop_context(ohci, (*d)->ctrlClear, NULL);
if (d->buf)
rvfree((void *)d->buf, d->num_desc * d->buf_size);
if ((*d)->buf) rvfree((void *)(*d)->buf,
(*d)->num_desc * (*d)->buf_size);
if ((*d)->ir_prg) {
for (i=0;i<(*d)->num_desc;i++)
if ((*d)->ir_prg[i]) kfree((*d)->ir_prg[i]);
kfree((*d)->ir_prg);
if (d->ir_prg) {
for (i=0;i<d->num_desc;i++)
if (d->ir_prg[i]) kfree(d->ir_prg[i]);
kfree(d->ir_prg);
}
if ((*d)->it_prg) {
for (i=0;i<(*d)->num_desc;i++)
if ((*d)->it_prg[i]) kfree((*d)->it_prg[i]);
kfree((*d)->it_prg);
if (d->it_prg) {
for (i=0;i<d->num_desc;i++)
if (d->it_prg[i]) kfree(d->it_prg[i]);
kfree(d->it_prg);
}
if ((*d)->buffer_status)
kfree((*d)->buffer_status);
if ((*d)->buffer_time)
kfree((*d)->buffer_time);
if ((*d)->last_used_cmd)
kfree((*d)->last_used_cmd);
if ((*d)->next_buffer)
kfree((*d)->next_buffer);
usage = ((*d)->type == ISO_RECEIVE) ? &ohci->ir_ctx_usage :
&ohci->it_ctx_usage;
if (d->buffer_status)
kfree(d->buffer_status);
if (d->buffer_time)
kfree(d->buffer_time);
if (d->last_used_cmd)
kfree(d->last_used_cmd);
if (d->next_buffer)
kfree(d->next_buffer);
usage = (d->type == ISO_RECEIVE) ? &d->ohci->ir_ctx_usage :
&d->ohci->it_ctx_usage;
/* clear the ISO context usage bit */
clear_bit((*d)->ctx, usage);
kfree(*d);
*d = NULL;
clear_bit(d->ctx, usage);
list_del(&d->link);
kfree(d);
return 0;
}
......@@ -318,11 +377,17 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->ir_prg = NULL;
init_waitqueue_head(&d->waitq);
if (ohci1394_hook_irq(ohci, irq_handler, d) != 0) {
PRINT(KERN_ERR, ohci->id, "ohci1394_hook_irq() failed");
free_dma_iso_ctx(d);
return NULL;
}
d->buf = rvmalloc(d->num_desc * d->buf_size);
if (d->buf == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
memset(d->buf, 0, d->num_desc * d->buf_size);
......@@ -339,7 +404,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
if (d->ir_prg == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma ir prg");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
......@@ -355,7 +420,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
if (d->ir_prg[i] == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma ir prg");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
}
......@@ -371,7 +436,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
if (d->it_prg == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma it prg");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
......@@ -383,7 +448,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
"Packet size %d (page_size: %ld) "
"not yet supported\n",
packet_size, PAGE_SIZE);
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
......@@ -402,7 +467,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
if (d->it_prg[i] == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma it prg");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
}
......@@ -419,22 +484,22 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
if (d->buffer_status == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate buffer_status");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
if (d->buffer_time == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate buffer_time");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
if (d->last_used_cmd == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate last_used_cmd");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
if (d->next_buffer == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate next_buffer");
free_dma_iso_ctx(&d);
free_dma_iso_ctx(d);
return NULL;
}
memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
......@@ -536,36 +601,17 @@ static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
}
/* find which context is listening to this channel */
static struct dma_iso_ctx **
ir_ctx_listening(struct video_card *video, int channel)
{
int i;
struct ti_ohci *ohci = video->ohci;
for (i = 0; i < ohci->nb_iso_rcv_ctx-1; i++)
if (video->ir_context[i] &&
video->ir_context[i]->channel == channel)
return &video->ir_context[i];
PRINT(KERN_ERR, ohci->id, "No iso context is listening to channel %d",
channel);
return NULL;
}
static struct dma_iso_ctx **
it_ctx_talking(struct video_card *video, int channel)
static struct dma_iso_ctx *
find_ctx(struct list_head *list, int type, int channel)
{
int i;
struct ti_ohci *ohci = video->ohci;
struct list_head *lh;
for (i = 0; i < ohci->nb_iso_xmit_ctx; i++)
if (video->it_context[i] &&
video->it_context[i]->channel==channel)
return &video->ir_context[i];
PRINT(KERN_ERR, ohci->id, "No iso context is talking to channel %d",
channel);
list_for_each(lh, list) {
struct dma_iso_ctx *ctx;
ctx = list_entry(lh, struct dma_iso_ctx, link);
if (ctx->type == type && ctx->channel == channel)
return ctx;
}
return NULL;
}
......@@ -585,7 +631,11 @@ int wakeup_dma_ir_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
if (d->ir_prg[i][d->nb_cmd-1].status & 0xFFFF0000) {
reset_ir_status(d, i);
d->buffer_status[i] = VIDEO1394_BUFFER_READY;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
get_fast_time(&d->buffer_time[i]);
#else
do_gettimeofday(&d->buffer_time[i]);
#endif
}
}
spin_unlock(&d->lock);
......@@ -790,13 +840,14 @@ static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
}
static int do_iso_mmap(struct vm_area_struct *vma, struct ti_ohci *ohci, struct dma_iso_ctx *d,
const char *adr, unsigned long size)
static int do_iso_mmap(struct ti_ohci *ohci, struct dma_iso_ctx *d,
struct vm_area_struct *vma)
{
unsigned long start=(unsigned long) adr;
unsigned long page,pos;
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long page, pos;
if (size>d->num_desc * d->buf_size) {
if (size > d->num_desc * d->buf_size) {
PRINT(KERN_ERR, ohci->id,
"iso context %d buf size is different from mmap size",
d->ctx);
......@@ -808,14 +859,14 @@ static int do_iso_mmap(struct vm_area_struct *vma, struct ti_ohci *ohci, struct
return -EINVAL;
}
pos=(unsigned long) d->buf;
pos = (unsigned long) d->buf;
while (size > 0) {
page = kvirt_to_pa(pos);
if (remap_page_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
if (remap_page_range_1394(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start+=PAGE_SIZE;
pos+=PAGE_SIZE;
size-=PAGE_SIZE;
start += PAGE_SIZE;
pos += PAGE_SIZE;
size -= PAGE_SIZE;
}
return 0;
}
......@@ -823,30 +874,10 @@ static int do_iso_mmap(struct vm_area_struct *vma, struct ti_ohci *ohci, struct
static int video1394_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct video_card *video = NULL;
struct ti_ohci *ohci = NULL;
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct video_card *video = ctx->video;
struct ti_ohci *ohci = video->ohci;
unsigned long flags;
struct list_head *lh;
spin_lock_irqsave(&video1394_cards_lock, flags);
if (!list_empty(&video1394_cards)) {
struct video_card *p;
list_for_each(lh, &video1394_cards) {
p = list_entry(lh, struct video_card, list);
if (p->id == ieee1394_file_to_instance(file)) {
video = p;
ohci = video->ohci;
break;
}
}
}
spin_unlock_irqrestore(&video1394_cards_lock, flags);
if (video == NULL) {
PRINT_G(KERN_ERR, "%s: Unknown video card for minor %d",
__FUNCTION__, ieee1394_file_to_instance(file));
return -EFAULT;
}
switch(cmd)
{
......@@ -855,6 +886,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
{
struct video1394_mmap v;
u64 mask;
struct dma_iso_ctx *d;
int i;
if(copy_from_user(&v, (void *)arg, sizeof(v)))
......@@ -889,7 +921,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
}
ohci->ISO_channel_usage |= mask;
if (v.buf_size<=0) {
PRINT(KERN_ERR, ohci->id,
"Invalid %d length buffer requested",v.buf_size);
......@@ -910,67 +942,47 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
}
if (cmd == VIDEO1394_LISTEN_CHANNEL) {
/* find a free iso receive context */
for (i=0;i<ohci->nb_iso_rcv_ctx-1;i++)
if (video->ir_context[i]==NULL) break;
if (i==(ohci->nb_iso_rcv_ctx-1)) {
PRINT(KERN_ERR, ohci->id,
"No iso context available");
return -EFAULT;
}
d = alloc_dma_iso_ctx(ohci, ISO_RECEIVE,
v.nb_buffers, v.buf_size,
v.channel, 0);
video->ir_context[i] =
alloc_dma_iso_ctx(ohci, ISO_RECEIVE,
v.nb_buffers, v.buf_size,
v.channel, 0);
if (video->ir_context[i] == NULL) {
if (d == NULL) {
PRINT(KERN_ERR, ohci->id,
"Couldn't allocate ir context");
return -EFAULT;
}
initialize_dma_ir_ctx(video->ir_context[i],
v.sync_tag, v.flags);
initialize_dma_ir_ctx(d, v.sync_tag, v.flags);
video->current_ctx = video->ir_context[i];
ctx->current_ctx = d;
v.buf_size = video->ir_context[i]->buf_size;
v.buf_size = d->buf_size;
list_add_tail(&d->link, &ctx->context_list);
PRINT(KERN_INFO, ohci->id,
"iso context %d listen on channel %d",
video->current_ctx->ctx, v.channel);
d->ctx, v.channel);
}
else {
/* find a free iso transmit context */
for (i=0;i<ohci->nb_iso_xmit_ctx;i++)
if (video->it_context[i]==NULL) break;
if (i==ohci->nb_iso_xmit_ctx) {
PRINT(KERN_ERR, ohci->id,
"No iso context available");
return -EFAULT;
}
video->it_context[i] =
alloc_dma_iso_ctx(ohci, ISO_TRANSMIT,
v.nb_buffers, v.buf_size,
v.channel, v.packet_size);
d = alloc_dma_iso_ctx(ohci, ISO_TRANSMIT,
v.nb_buffers, v.buf_size,
v.channel, v.packet_size);
if (video->it_context[i] == NULL) {
if (d == NULL) {
PRINT(KERN_ERR, ohci->id,
"Couldn't allocate it context");
return -EFAULT;
}
initialize_dma_it_ctx(video->it_context[i],
v.sync_tag, v.syt_offset, v.flags);
initialize_dma_it_ctx(d, v.sync_tag,
v.syt_offset, v.flags);
video->current_ctx = video->it_context[i];
ctx->current_ctx = d;
v.buf_size = video->it_context[i]->buf_size;
v.buf_size = d->buf_size;
list_add_tail(&d->link, &ctx->context_list);
PRINT(KERN_INFO, ohci->id,
"Iso context %d talk on channel %d", i,
"Iso context %d talk on channel %d", d->ctx,
v.channel);
}
......@@ -984,6 +996,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
{
int channel;
u64 mask;
struct dma_iso_ctx *d;
if(copy_from_user(&channel, (void *)arg, sizeof(int)))
return -EFAULT;
......@@ -999,41 +1012,31 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
"Channel %d is not being used", channel);
return -EFAULT;
}
/* Mark this channel as unused */
ohci->ISO_channel_usage &= ~mask;
if (cmd == VIDEO1394_UNLISTEN_CHANNEL) {
struct dma_iso_ctx **d;
d = ir_ctx_listening(video, channel);
if (d == NULL) return -EFAULT;
PRINT(KERN_INFO, ohci->id,
"Iso context %d stop listening on channel %d",
(*d)->ctx, channel);
free_dma_iso_ctx(d);
}
else {
struct dma_iso_ctx **d;
d = it_ctx_talking(video, channel);
if (d == NULL) return -EFAULT;
PRINT(KERN_INFO, ohci->id,
"Iso context %d stop talking on channel %d",
(*d)->ctx, channel);
free_dma_iso_ctx(d);
if (cmd == VIDEO1394_UNLISTEN_CHANNEL)
d = find_ctx(&ctx->context_list, ISO_RECEIVE, channel);
else
d = find_ctx(&ctx->context_list, ISO_TRANSMIT, channel);
}
if (d == NULL) return -EFAULT;
PRINT(KERN_INFO, ohci->id, "Iso context %d "
"stop talking on channel %d", d->ctx, channel);
free_dma_iso_ctx(d);
return 0;
}
case VIDEO1394_LISTEN_QUEUE_BUFFER:
{
struct video1394_wait v;
struct dma_iso_ctx *d, **dd;
struct dma_iso_ctx *d;
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
dd = ir_ctx_listening(video, v.channel);
if (dd == NULL) return -EFAULT;
d = *dd;
d = find_ctx(&ctx->context_list, ISO_RECEIVE, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1089,15 +1092,13 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
case VIDEO1394_LISTEN_POLL_BUFFER:
{
struct video1394_wait v;
struct dma_iso_ctx *d, **dd;
struct dma_iso_ctx *d;
int i;
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
dd = ir_ctx_listening(video, v.channel);
if (dd==NULL) return -EFAULT;
d = *dd;
d = find_ctx(&ctx->context_list, ISO_RECEIVE, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1173,14 +1174,12 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
{
struct video1394_wait v;
struct video1394_queue_variable qv;
struct dma_iso_ctx *d, **dd;
struct dma_iso_ctx *d;
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
dd = it_ctx_talking(video, v.channel);
if (dd == NULL) return -EFAULT;
d = *dd;
d = find_ctx(&ctx->context_list, ISO_TRANSMIT, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1264,14 +1263,12 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
case VIDEO1394_TALK_WAIT_BUFFER:
{
struct video1394_wait v;
struct dma_iso_ctx *d, **dd;
struct dma_iso_ctx *d;
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
dd = it_ctx_talking(video, v.channel);
if (dd == NULL) return -EFAULT;
d = *dd;
d = find_ctx(&ctx->context_list, ISO_TRANSMIT, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1321,40 +1318,18 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_card *video = NULL;
struct ti_ohci *ohci;
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct video_card *video = ctx->video;
struct ti_ohci *ohci = video->ohci;
int res = -EINVAL;
unsigned long flags;
struct list_head *lh;
spin_lock_irqsave(&video1394_cards_lock, flags);
if (!list_empty(&video1394_cards)) {
struct video_card *p;
list_for_each(lh, &video1394_cards) {
p = list_entry(lh, struct video_card, list);
if (p->id == ieee1394_file_to_instance(file)) {
video = p;
break;
}
}
}
spin_unlock_irqrestore(&video1394_cards_lock, flags);
if (video == NULL) {
PRINT_G(KERN_ERR, "%s: Unknown video card for minor %d",
__FUNCTION__, ieee1394_file_to_instance(file));
return -EFAULT;
}
lock_kernel();
ohci = video->ohci;
if (video->current_ctx == NULL) {
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ohci->id, "Current iso context not set");
} else
res = do_iso_mmap(vma, ohci, video->current_ctx,
(char *)vma->vm_start,
(unsigned long)(vma->vm_end-vma->vm_start));
res = do_iso_mmap(ohci, ctx->current_ctx, vma);
unlock_kernel();
return res;
}
......@@ -1365,16 +1340,14 @@ static int video1394_open(struct inode *inode, struct file *file)
unsigned long flags;
struct video_card *video = NULL;
struct list_head *lh;
struct file_ctx *ctx;
spin_lock_irqsave(&video1394_cards_lock, flags);
if (!list_empty(&video1394_cards)) {
struct video_card *p;
list_for_each(lh, &video1394_cards) {
p = list_entry(lh, struct video_card, list);
if (p->id == i) {
video = p;
break;
}
list_for_each(lh, &video1394_cards) {
struct video_card *p = list_entry(lh, struct video_card, list);
if (p->id == i) {
video = p;
break;
}
}
spin_unlock_irqrestore(&video1394_cards_lock, flags);
......@@ -1382,76 +1355,50 @@ static int video1394_open(struct inode *inode, struct file *file)
if (video == NULL)
return -EIO;
V22_COMPAT_MOD_INC_USE_COUNT;
ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL);
if (ctx == NULL) {
PRINT(KERN_ERR, video->ohci->id, "Cannot malloc file_ctx");
return -ENOMEM;
}
memset(ctx, 0, sizeof(struct file_ctx));
ctx->video = video;
INIT_LIST_HEAD(&ctx->context_list);
ctx->current_ctx = NULL;
file->private_data = ctx;
return 0;
}
static int video1394_release(struct inode *inode, struct file *file)
{
struct video_card *video = NULL;
struct ti_ohci *ohci;
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct video_card *video = ctx->video;
struct ti_ohci *ohci = video->ohci;
struct list_head *lh, *next;
u64 mask;
int i;
unsigned long flags;
struct list_head *lh;
spin_lock_irqsave(&video1394_cards_lock, flags);
if (!list_empty(&video1394_cards)) {
struct video_card *p;
list_for_each(lh, &video1394_cards) {
p = list_entry(lh, struct video_card, list);
if (p->id == ieee1394_file_to_instance(file)) {
video = p;
break;
}
}
}
spin_unlock_irqrestore(&video1394_cards_lock, flags);
if (video == NULL) {
PRINT_G(KERN_ERR, "%s: Unknown device for minor %d",
__FUNCTION__, ieee1394_file_to_instance(file));
return 1;
lock_kernel();
list_for_each_safe(lh, next, &ctx->context_list) {
struct dma_iso_ctx *d;
d = list_entry(lh, struct dma_iso_ctx, link);
mask = (u64) 1 << d->channel;
if (!(ohci->ISO_channel_usage & mask))
PRINT(KERN_ERR, ohci->id, "On release: Channel %d "
"is not being used", d->channel);
else
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->id, "On release: Iso %s context "
"%d stop listening on channel %d",
d->type == ISO_RECEIVE ? "receive" : "transmit",
d->ctx, d->channel);
free_dma_iso_ctx(d);
}
ohci = video->ohci;
lock_kernel();
for (i=0;i<ohci->nb_iso_rcv_ctx-1;i++)
if (video->ir_context[i]) {
mask = (u64)0x1<<video->ir_context[i]->channel;
if (!(ohci->ISO_channel_usage & mask))
PRINT(KERN_ERR, ohci->id,
"On release: Channel %d is not being used",
video->ir_context[i]->channel);
else
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->id,
"Iso receive context %d stop listening "
"on channel %d", video->ir_context[i]->ctx,
video->ir_context[i]->channel);
free_dma_iso_ctx(&video->ir_context[i]);
}
kfree(ctx);
file->private_data = NULL;
for (i=0;i<ohci->nb_iso_xmit_ctx;i++)
if (video->it_context[i]) {
mask = (u64)0x1<<video->it_context[i]->channel;
if (!(ohci->ISO_channel_usage & mask))
PRINT(KERN_ERR, ohci->id,
"Channel %d is not being used",
video->it_context[i]->channel);
else
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->id,
"Iso transmit context %d stop talking "
"on channel %d", video->it_context[i]->ctx,
video->it_context[i]->channel);
free_dma_iso_ctx(&video->it_context[i]);
}
V22_COMPAT_MOD_DEC_USE_COUNT;
unlock_kernel();
return 0;
}
......@@ -1459,32 +1406,20 @@ static int video1394_release(struct inode *inode, struct file *file)
static void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data)
{
int i;
struct video_card *video = (struct video_card*) data;
struct dma_iso_ctx *d = (struct dma_iso_ctx *) data;
if (video == NULL) {
PRINT_G(KERN_ERR, "%s: Unknown card number %d",
__FUNCTION__, card);
return;
}
DBGMSG(card, "Iso event Recv: %08x Xmit: %08x",
isoRecvIntEvent, isoXmitIntEvent);
for (i = 0; i < video->ohci->nb_iso_rcv_ctx-1; i++)
if (video->ir_context[i] != NULL &&
isoRecvIntEvent & (1<<(video->ir_context[i]->ctx)))
wakeup_dma_ir_ctx(video->ohci, video->ir_context[i]);
for (i = 0; i < video->ohci->nb_iso_xmit_ctx; i++)
if (video->it_context[i] != NULL &&
isoXmitIntEvent & (1<<(video->it_context[i]->ctx)))
wakeup_dma_it_ctx(video->ohci, video->it_context[i]);
if (d->type == ISO_RECEIVE && isoRecvIntEvent & (1 << d->ctx))
wakeup_dma_ir_ctx(d->ohci, d);
if (d->type == ISO_TRANSMIT && isoXmitIntEvent & (1 << d->ctx))
wakeup_dma_it_ctx(d->ohci, d);
}
static struct file_operations video1394_fops=
{
OWNER_THIS_MODULE
owner: THIS_MODULE,
ioctl: video1394_ioctl,
mmap: video1394_mmap,
open: video1394_open,
......@@ -1493,10 +1428,12 @@ static struct file_operations video1394_fops=
static int video1394_init(struct ti_ohci *ohci)
{
struct video_card *video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
struct video_card *video;
unsigned long flags;
char name[16];
int minor;
video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
if (video == NULL) {
PRINT(KERN_ERR, ohci->id, "Cannot allocate video_card");
return -1;
......@@ -1509,43 +1446,14 @@ static int video1394_init(struct ti_ohci *ohci)
list_add_tail(&video->list, &video1394_cards);
spin_unlock_irqrestore(&video1394_cards_lock, flags);
if (ohci1394_hook_irq(ohci, irq_handler, (void*) video) != 0) {
PRINT(KERN_ERR, ohci->id, "ohci1394_hook_irq() failed");
return -1;
}
video->id = ohci->id;
video->ohci = ohci;
/* Iso receive dma contexts */
video->ir_context = (struct dma_iso_ctx **)
kmalloc((ohci->nb_iso_rcv_ctx-1)*
sizeof(struct dma_iso_ctx *), GFP_KERNEL);
if (video->ir_context)
memset(video->ir_context, 0,
(ohci->nb_iso_rcv_ctx-1)*sizeof(struct dma_iso_ctx *));
else {
PRINT(KERN_ERR, ohci->id, "Cannot allocate ir_context");
return -1;
}
/* Iso transmit dma contexts */
video->it_context = (struct dma_iso_ctx **)
kmalloc(ohci->nb_iso_xmit_ctx *
sizeof(struct dma_iso_ctx *), GFP_KERNEL);
if (video->it_context)
memset(video->it_context, 0,
ohci->nb_iso_xmit_ctx * sizeof(struct dma_iso_ctx *));
else {
PRINT(KERN_ERR, ohci->id, "Cannot allocate it_context");
return -1;
}
sprintf(name, "%d", video->id);
minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + video->id;
video->devfs = devfs_register(devfs_handle, name,
DEVFS_FL_AUTO_OWNER,
IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_VIDEO1394*16+video->id,
IEEE1394_MAJOR, minor,
S_IFCHR | S_IRUSR | S_IWUSR,
&video1394_fops, NULL);
......@@ -1555,27 +1463,7 @@ static int video1394_init(struct ti_ohci *ohci)
/* Must be called under spinlock */
static void remove_card(struct video_card *video)
{
int i;
ohci1394_unhook_irq(video->ohci, irq_handler, (void*) video);
devfs_unregister(video->devfs);
/* Free the iso receive contexts */
if (video->ir_context) {
for (i=0;i<video->ohci->nb_iso_rcv_ctx-1;i++) {
free_dma_iso_ctx(&video->ir_context[i]);
}
kfree(video->ir_context);
}
/* Free the iso transmit contexts */
if (video->it_context) {
for (i=0;i<video->ohci->nb_iso_xmit_ctx;i++) {
free_dma_iso_ctx(&video->it_context[i]);
}
kfree(video->it_context);
}
list_del(&video->list);
kfree(video);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment