Commit d7dfb07d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'firewire-updates-6.12' of...

Merge tag 'firewire-updates-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394

Pull firewire updates from Takashi Sakamoto:
 "In the FireWire subsystem, tasklets have been used as the bottom half
  of 1394 OHCi hardIRQ. In recent kernel updates, BH workqueues have
  become available, and some developers have proposed replacing the
  tasklet with a BH workqueue.

  As a first step towards dropping tasklet use, the 1394 OHCI
  isochronous context can use regular workqueues. In this context, the
  batch of packets is processed in the specific queue, thus the timing
  jitter caused by task scheduling is not so critical.

  Additionally, DMA transmission can be scheduled per-packet basis,
  therefore the context can be sleep between the operation of
  transmissions. Furthermore, in-kernel protocol implementation involves
  some CPU-bound tasks, which can sometimes consumes CPU time so long.
  These characteristics suggest that normal workqueues are suitable,
  through BH workqueues are not.

  The replacement with a workqueue allows unit drivers to process the
  content of packets in non-atomic context. It brings some reliefs to
  some drivers in sound subsystem that spin-lock is not mandatory
  anymore during isochronous packet processing.

  Summary:

   - Replace tasklet with workqueue for isochronous context

   - Replace IDR with XArray

   - Utilize guard macro where possible

   - Print deprecation warning when enabling debug parameter of
     firewire-ohci module

   - Switch to nonatomic PCM operation"

* tag 'firewire-updates-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: (55 commits)
  firewire: core: rename cause flag of tracepoints event
  firewire: core: update documentation of kernel APIs for flushing completions
  firewire: core: add helper function to retire descriptors
  Revert "firewire: core: move workqueue handler from 1394 OHCI driver to core function"
  Revert "firewire: core: use mutex to coordinate concurrent calls to flush completions"
  firewire: core: use mutex to coordinate concurrent calls to flush completions
  firewire: core: move workqueue handler from 1394 OHCI driver to core function
  firewire: core: fulfill documentation of fw_iso_context_flush_completions()
  firewire: core: expose kernel API to schedule work item to process isochronous context
  firewire: core: use WARN_ON_ONCE() to avoid superfluous dumps
  ALSA: firewire: use nonatomic PCM operation
  firewire: core: non-atomic memory allocation for isochronous event to user client
  firewire: ohci: operate IT/IR events in sleepable work process instead of tasklet softIRQ
  firewire: core: add local API to queue work item to workqueue specific to isochronous contexts
  firewire: core: allocate workqueue to handle isochronous contexts in card
  firewire: ohci: obsolete direct usage of printk_ratelimit()
  firewire: ohci: deprecate debug parameter
  firewire: core: update fw_device outside of device_find_child()
  firewire: ohci: fix error path to detect initiated reset in TI TSB41BA3D phy
  firewire: core/ohci: minor refactoring for computation of configuration ROM size
  ...
parents 3a378723 f1cba521
......@@ -43,6 +43,8 @@ Firewire core transaction interfaces
Firewire Isochronous I/O interfaces
===================================
.. kernel-doc:: include/linux/firewire.h
:functions: fw_iso_context_schedule_flush_completions
.. kernel-doc:: drivers/firewire/core-iso.c
:export:
......@@ -168,7 +168,6 @@ static size_t required_space(struct fw_descriptor *desc)
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
int ret;
/*
* Check descriptor is valid; the length of all blocks in the
......@@ -182,29 +181,25 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
if (i != desc->length)
return -EINVAL;
mutex_lock(&card_mutex);
guard(mutex)(&card_mutex);
if (config_rom_length + required_space(desc) > 256)
return -EBUSY;
if (config_rom_length + required_space(desc) > 256) {
ret = -EBUSY;
} else {
list_add_tail(&desc->link, &descriptor_list);
config_rom_length += required_space(desc);
descriptor_count++;
if (desc->immediate > 0)
descriptor_count++;
update_config_roms();
ret = 0;
}
mutex_unlock(&card_mutex);
return ret;
return 0;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
mutex_lock(&card_mutex);
guard(mutex)(&card_mutex);
list_del(&desc->link);
config_rom_length -= required_space(desc);
......@@ -212,8 +207,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
if (desc->immediate > 0)
descriptor_count--;
update_config_roms();
mutex_unlock(&card_mutex);
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
......@@ -381,11 +374,11 @@ static void bm_work(struct work_struct *work)
bm_id = be32_to_cpu(transaction_data[0]);
spin_lock_irq(&card->lock);
scoped_guard(spinlock_irq, &card->lock) {
if (rcode == RCODE_COMPLETE && generation == card->generation)
card->bm_node_id =
bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
spin_unlock_irq(&card->lock);
}
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
......@@ -578,25 +571,47 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid)
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
struct workqueue_struct *isoc_wq;
int ret;
// This workqueue should be:
// * != WQ_BH Sleepable.
// * == WQ_UNBOUND Any core can process data for isoc context. The
// implementation of unit protocol could consumes the core
// longer somehow.
// * != WQ_MEM_RECLAIM Not used for any backend of block device.
// * == WQ_FREEZABLE Isochronous communication is at regular interval in real
// time, thus should be drained if possible at freeze phase.
// * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
// contexts if they are scheduled to the same cycle.
isoc_wq = alloc_workqueue("firewire-isoc-card%u",
WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
supported_isoc_contexts, card->index);
if (!isoc_wq)
return -ENOMEM;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
mutex_lock(&card_mutex);
guard(mutex)(&card_mutex);
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
if (ret == 0)
list_add_tail(&card->link, &card_list);
if (ret < 0) {
destroy_workqueue(isoc_wq);
return ret;
}
mutex_unlock(&card_mutex);
card->isoc_wq = isoc_wq;
list_add_tail(&card->link, &card_list);
return ret;
return 0;
}
EXPORT_SYMBOL(fw_card_add);
......@@ -714,29 +729,31 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
unsigned long flags;
might_sleep();
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_schedule_bus_reset(card, false, true);
mutex_lock(&card_mutex);
scoped_guard(mutex, &card_mutex)
list_del_init(&card->link);
mutex_unlock(&card_mutex);
/* Switch off most of the card driver interface. */
dummy_driver.free_iso_context = card->driver->free_iso_context;
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
drain_workqueue(card->isoc_wq);
spin_lock_irqsave(&card->lock, flags);
scoped_guard(spinlock_irqsave, &card->lock)
fw_destroy_nodes(card);
spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
wait_for_completion(&card->done);
destroy_workqueue(card->isoc_wq);
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
......
This diff is collapsed.
This diff is collapsed.
......@@ -209,23 +209,63 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
/**
* fw_iso_context_flush_completions() - process isochronous context in current process context.
* @ctx: the isochronous context
*
* Process the isochronous context in the current process context. The registered callback function
* is called when a queued packet buffer with the interrupt flag is completed, either after
* transmission in the IT context or after being filled in the IR context. Additionally, the
* callback function is also called for the packet buffer completed at last. Furthermore, the
* callback function is called as well when the header buffer in the context becomes full. If it is
* required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is
* available instead.
*
* Context: Process context. May sleep due to disable_work_sync().
*/
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
int err;
trace_isoc_outbound_flush_completions(ctx);
trace_isoc_inbound_single_flush_completions(ctx);
trace_isoc_inbound_multiple_flush_completions(ctx);
return ctx->card->driver->flush_iso_completions(ctx);
might_sleep();
// Avoid dead lock due to programming mistake.
if (WARN_ON_ONCE(current_work() == &ctx->work))
return 0;
disable_work_sync(&ctx->work);
err = ctx->card->driver->flush_iso_completions(ctx);
enable_work(&ctx->work);
return err;
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
int err;
trace_isoc_outbound_stop(ctx);
trace_isoc_inbound_single_stop(ctx);
trace_isoc_inbound_multiple_stop(ctx);
return ctx->card->driver->stop_iso(ctx);
might_sleep();
// Avoid dead lock due to programming mistake.
if (WARN_ON_ONCE(current_work() == &ctx->work))
return 0;
err = ctx->card->driver->stop_iso(ctx);
cancel_work_sync(&ctx->work);
return err;
}
EXPORT_SYMBOL(fw_iso_context_stop);
......@@ -375,9 +415,8 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
int irm_id, ret, c = -EINVAL;
spin_lock_irq(&card->lock);
scoped_guard(spinlock_irq, &card->lock)
irm_id = card->irm_node->node_id;
spin_unlock_irq(&card->lock);
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
......
......@@ -39,7 +39,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
node->port_count = port_count;
refcount_set(&node->ref_count, 1);
kref_init(&node->kref);
INIT_LIST_HEAD(&node->link);
return node;
......@@ -455,11 +455,10 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
unsigned long flags;
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
spin_lock_irqsave(&card->lock, flags);
guard(spinlock_irqsave)(&card->lock);
/*
* If the selfID buffer is not the immediate successor of the
......@@ -500,7 +499,5 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
} else {
update_tree(card, local_node);
}
spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
......@@ -13,7 +13,6 @@
#include <linux/firewire-constants.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
......@@ -49,35 +48,31 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
u32 response_tstamp)
{
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
scoped_guard(spinlock_irqsave, &card->lock) {
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter == transaction) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
}
if (!t)
return -ENOENT;
if (t) {
if (!t->with_tstamp) {
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
} else {
t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp,
NULL, 0, t->callback_data);
}
return 0;
t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
t->callback_data);
}
timed_out:
return -ENOENT;
return 0;
}
/*
......@@ -121,16 +116,13 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
{
struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
if (list_empty(&t->link)) {
spin_unlock_irqrestore(&card->lock, flags);
scoped_guard(spinlock_irqsave, &card->lock) {
if (list_empty(&t->link))
return;
}
list_del(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
spin_unlock_irqrestore(&card->lock, flags);
}
if (!t->with_tstamp) {
t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
......@@ -143,20 +135,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
unsigned long flags;
guard(spinlock_irqsave)(&card->lock);
spin_lock_irqsave(&card->lock, flags);
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
spin_unlock_irqrestore(&card->lock, flags);
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
}
t->is_split_transaction = true;
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
spin_unlock_irqrestore(&card->lock, flags);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
......@@ -464,7 +450,6 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
static struct fw_packet phy_config_packet = {
.header_length = 12,
.header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
......@@ -495,8 +480,9 @@ void fw_send_phy_config(struct fw_card *card,
phy_packet_phy_config_set_gap_count(&data, gap_count);
phy_packet_phy_config_set_gap_count_optimization(&data, true);
mutex_lock(&phy_config_mutex);
guard(mutex)(&phy_config_mutex);
async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
phy_config_packet.header[1] = data;
phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
......@@ -508,8 +494,6 @@ void fw_send_phy_config(struct fw_card *card,
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
mutex_unlock(&phy_config_mutex);
}
static struct fw_address_handler *lookup_overlapping_address_handler(
......@@ -598,7 +582,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
handler->length == 0)
return -EINVAL;
spin_lock(&address_handler_list_lock);
guard(spinlock)(&address_handler_list_lock);
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
......@@ -617,8 +601,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
}
}
spin_unlock(&address_handler_list_lock);
return ret;
}
EXPORT_SYMBOL(fw_core_add_address_handler);
......@@ -634,9 +616,9 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
spin_lock(&address_handler_list_lock);
scoped_guard(spinlock, &address_handler_list_lock)
list_del_rcu(&handler->link);
spin_unlock(&address_handler_list_lock);
synchronize_rcu();
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
......@@ -927,16 +909,14 @@ static void handle_exclusive_region_request(struct fw_card *card,
if (tcode == TCODE_LOCK_REQUEST)
tcode = 0x10 + async_header_get_extended_tcode(p->header);
rcu_read_lock();
handler = lookup_enclosing_address_handler(&address_handler_list,
offset, request->length);
scoped_guard(rcu) {
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
handler->address_callback(card, request,
tcode, destination, source,
p->generation, offset,
request->data, request->length,
handler->callback_data);
rcu_read_unlock();
handler->address_callback(card, request, tcode, destination, source,
p->generation, offset, request->data,
request->length, handler->callback_data);
}
if (!handler)
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
......@@ -969,17 +949,14 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
rcu_read_lock();
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, request, tcode,
destination, source,
p->generation, offset,
request->data,
request->length,
handler->callback_data);
handler->address_callback(card, request, tcode, destination, source,
p->generation, offset, request->data,
request->length, handler->callback_data);
}
}
rcu_read_unlock();
fw_send_response(card, request, RCODE_COMPLETE);
}
......@@ -1024,7 +1001,6 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
int tcode, tlabel, source, rcode;
......@@ -1063,26 +1039,23 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
spin_lock_irqsave(&card->lock, flags);
scoped_guard(spinlock_irqsave, &card->lock) {
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
}
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
p->timestamp, p->header, data, data_length / 4);
if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
......@@ -1186,7 +1159,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
unsigned long flags;
switch (reg) {
case CSR_PRIORITY_BUDGET:
......@@ -1228,10 +1200,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
spin_lock_irqsave(&card->lock, flags);
guard(spinlock_irqsave)(&card->lock);
card->split_timeout_hi = be32_to_cpu(*data) & 7;
update_split_timeout(card);
spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
......@@ -1241,11 +1213,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
spin_lock_irqsave(&card->lock, flags);
card->split_timeout_lo =
be32_to_cpu(*data) & 0xfff80000;
guard(spinlock_irqsave)(&card->lock);
card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
update_split_timeout(card);
spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
......@@ -1387,7 +1358,7 @@ static void __exit fw_core_cleanup(void)
unregister_chrdev(fw_cdev_major, "firewire");
bus_unregister(&fw_bus_type);
destroy_workqueue(fw_workqueue);
idr_destroy(&fw_device_idr);
xa_destroy(&fw_device_xa);
}
module_init(fw_core_init);
......
......@@ -7,7 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/xarray.h>
#include <linux/mm_types.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
......@@ -115,8 +115,8 @@ struct fw_card_driver {
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver, struct device *device);
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts);
void fw_core_remove_card(struct fw_card *card);
int fw_compute_block_crc(__be32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
......@@ -133,7 +133,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
extern struct rw_semaphore fw_device_rwsem;
extern struct idr fw_device_idr;
extern struct xarray fw_device_xa;
extern int fw_cdev_major;
static inline struct fw_device *fw_device_get(struct fw_device *device)
......@@ -159,6 +159,11 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func)
{
INIT_WORK(&ctx->work, func);
}
/* -topology */
......@@ -183,7 +188,8 @@ struct fw_node {
* local node to this node. */
u8 max_depth:4; /* Maximum depth to any leaf node */
u8 max_hops:4; /* Max hops in this sub tree */
refcount_t ref_count;
struct kref kref;
/* For serializing node topology into a list. */
struct list_head link;
......@@ -196,17 +202,23 @@ struct fw_node {
static inline struct fw_node *fw_node_get(struct fw_node *node)
{
refcount_inc(&node->ref_count);
kref_get(&node->kref);
return node;
}
static inline void fw_node_put(struct fw_node *node)
static void release_node(struct kref *kref)
{
if (refcount_dec_and_test(&node->ref_count))
struct fw_node *node = container_of(kref, struct fw_node, kref);
kfree(node);
}
static inline void fw_node_put(struct fw_node *node)
{
kref_put(&node->kref, release_node);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
......
......@@ -40,9 +40,75 @@ static void test_self_id_receive_buffer_deserialization(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
}
static void test_at_data_serdes(struct kunit *test)
{
static const __le32 expected[] = {
cpu_to_le32(0x00020e80),
cpu_to_le32(0xffc2ffff),
cpu_to_le32(0xe0000000),
};
__le32 quadlets[] = {0, 0, 0};
bool has_src_bus_id = ohci1394_at_data_get_src_bus_id(expected);
unsigned int speed = ohci1394_at_data_get_speed(expected);
unsigned int tlabel = ohci1394_at_data_get_tlabel(expected);
unsigned int retry = ohci1394_at_data_get_retry(expected);
unsigned int tcode = ohci1394_at_data_get_tcode(expected);
unsigned int destination_id = ohci1394_at_data_get_destination_id(expected);
u64 destination_offset = ohci1394_at_data_get_destination_offset(expected);
KUNIT_EXPECT_FALSE(test, has_src_bus_id);
KUNIT_EXPECT_EQ(test, 0x02, speed);
KUNIT_EXPECT_EQ(test, 0x03, tlabel);
KUNIT_EXPECT_EQ(test, 0x02, retry);
KUNIT_EXPECT_EQ(test, 0x08, tcode);
ohci1394_at_data_set_src_bus_id(quadlets, has_src_bus_id);
ohci1394_at_data_set_speed(quadlets, speed);
ohci1394_at_data_set_tlabel(quadlets, tlabel);
ohci1394_at_data_set_retry(quadlets, retry);
ohci1394_at_data_set_tcode(quadlets, tcode);
ohci1394_at_data_set_destination_id(quadlets, destination_id);
ohci1394_at_data_set_destination_offset(quadlets, destination_offset);
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static void test_it_data_serdes(struct kunit *test)
{
static const __le32 expected[] = {
cpu_to_le32(0x000349a7),
cpu_to_le32(0x02300000),
};
__le32 quadlets[] = {0, 0};
unsigned int scode = ohci1394_it_data_get_speed(expected);
unsigned int tag = ohci1394_it_data_get_tag(expected);
unsigned int channel = ohci1394_it_data_get_channel(expected);
unsigned int tcode = ohci1394_it_data_get_tcode(expected);
unsigned int sync = ohci1394_it_data_get_sync(expected);
unsigned int data_length = ohci1394_it_data_get_data_length(expected);
KUNIT_EXPECT_EQ(test, 0x03, scode);
KUNIT_EXPECT_EQ(test, 0x01, tag);
KUNIT_EXPECT_EQ(test, 0x09, channel);
KUNIT_EXPECT_EQ(test, 0x0a, tcode);
KUNIT_EXPECT_EQ(test, 0x7, sync);
KUNIT_EXPECT_EQ(test, 0x0230, data_length);
ohci1394_it_data_set_speed(quadlets, scode);
ohci1394_it_data_set_tag(quadlets, tag);
ohci1394_it_data_set_channel(quadlets, channel);
ohci1394_it_data_set_tcode(quadlets, tcode);
ohci1394_it_data_set_sync(quadlets, sync);
ohci1394_it_data_set_data_length(quadlets, data_length);
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static struct kunit_case ohci_serdes_test_cases[] = {
KUNIT_CASE(test_self_id_count_register_deserialization),
KUNIT_CASE(test_self_id_receive_buffer_deserialization),
KUNIT_CASE(test_at_data_serdes),
KUNIT_CASE(test_it_data_serdes),
{}
};
......
This diff is collapsed.
......@@ -153,7 +153,205 @@
#define OHCI1394_evt_unknown 0xe
#define OHCI1394_evt_flushed 0xf
#define OHCI1394_phy_tcode 0xe
// Asynchronous Transmit DMA.
//
// The content of first two quadlets of data for AT DMA is different from the header for IEEE 1394
// asynchronous packet.
#define OHCI1394_AT_DATA_Q0_srcBusID_MASK 0x00800000
#define OHCI1394_AT_DATA_Q0_srcBusID_SHIFT 23
#define OHCI1394_AT_DATA_Q0_spd_MASK 0x00070000
#define OHCI1394_AT_DATA_Q0_spd_SHIFT 16
#define OHCI1394_AT_DATA_Q0_tLabel_MASK 0x0000fc00
#define OHCI1394_AT_DATA_Q0_tLabel_SHIFT 10
#define OHCI1394_AT_DATA_Q0_rt_MASK 0x00000300
#define OHCI1394_AT_DATA_Q0_rt_SHIFT 8
#define OHCI1394_AT_DATA_Q0_tCode_MASK 0x000000f0
#define OHCI1394_AT_DATA_Q0_tCode_SHIFT 4
#define OHCI1394_AT_DATA_Q1_destinationId_MASK 0xffff0000
#define OHCI1394_AT_DATA_Q1_destinationId_SHIFT 16
#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK 0x0000ffff
#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT 0
#define OHCI1394_AT_DATA_Q1_rCode_MASK 0x0000f000
#define OHCI1394_AT_DATA_Q1_rCode_SHIFT 12
static inline bool ohci1394_at_data_get_src_bus_id(const __le32 *data)
{
return !!((data[0] & OHCI1394_AT_DATA_Q0_srcBusID_MASK) >> OHCI1394_AT_DATA_Q0_srcBusID_SHIFT);
}
static inline void ohci1394_at_data_set_src_bus_id(__le32 *data, bool src_bus_id)
{
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_srcBusID_MASK);
data[0] |= cpu_to_le32((src_bus_id << OHCI1394_AT_DATA_Q0_srcBusID_SHIFT) & OHCI1394_AT_DATA_Q0_srcBusID_MASK);
}
static inline unsigned int ohci1394_at_data_get_speed(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_spd_MASK) >> OHCI1394_AT_DATA_Q0_spd_SHIFT;
}
static inline void ohci1394_at_data_set_speed(__le32 *data, unsigned int scode)
{
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_spd_MASK);
data[0] |= cpu_to_le32((scode << OHCI1394_AT_DATA_Q0_spd_SHIFT) & OHCI1394_AT_DATA_Q0_spd_MASK);
}
static inline unsigned int ohci1394_at_data_get_tlabel(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tLabel_MASK) >> OHCI1394_AT_DATA_Q0_tLabel_SHIFT;
}
static inline void ohci1394_at_data_set_tlabel(__le32 *data, unsigned int tlabel)
{
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tLabel_MASK);
data[0] |= cpu_to_le32((tlabel << OHCI1394_AT_DATA_Q0_tLabel_SHIFT) & OHCI1394_AT_DATA_Q0_tLabel_MASK);
}
static inline unsigned int ohci1394_at_data_get_retry(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_rt_MASK) >> OHCI1394_AT_DATA_Q0_rt_SHIFT;
}
static inline void ohci1394_at_data_set_retry(__le32 *data, unsigned int retry)
{
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_rt_MASK);
data[0] |= cpu_to_le32((retry << OHCI1394_AT_DATA_Q0_rt_SHIFT) & OHCI1394_AT_DATA_Q0_rt_MASK);
}
static inline unsigned int ohci1394_at_data_get_tcode(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tCode_MASK) >> OHCI1394_AT_DATA_Q0_tCode_SHIFT;
}
static inline void ohci1394_at_data_set_tcode(__le32 *data, unsigned int tcode)
{
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tCode_MASK);
data[0] |= cpu_to_le32((tcode << OHCI1394_AT_DATA_Q0_tCode_SHIFT) & OHCI1394_AT_DATA_Q0_tCode_MASK);
}
static inline unsigned int ohci1394_at_data_get_destination_id(const __le32 *data)
{
return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationId_MASK) >> OHCI1394_AT_DATA_Q1_destinationId_SHIFT;
}
static inline void ohci1394_at_data_set_destination_id(__le32 *data, unsigned int destination_id)
{
data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationId_MASK);
data[1] |= cpu_to_le32((destination_id << OHCI1394_AT_DATA_Q1_destinationId_SHIFT) & OHCI1394_AT_DATA_Q1_destinationId_MASK);
}
static inline u64 ohci1394_at_data_get_destination_offset(const __le32 *data)
{
u64 hi = (u64)((le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK) >> OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT);
u64 lo = (u64)le32_to_cpu(data[2]);
return (hi << 32) | lo;
}
static inline void ohci1394_at_data_set_destination_offset(__le32 *data, u64 offset)
{
u32 hi = (u32)(offset >> 32);
u32 lo = (u32)(offset & 0x00000000ffffffff);
data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
data[1] |= cpu_to_le32((hi << OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
data[2] = cpu_to_le32(lo);
}
static inline unsigned int ohci1394_at_data_get_rcode(const __le32 *data)
{
return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_rCode_MASK) >> OHCI1394_AT_DATA_Q1_rCode_SHIFT;
}
static inline void ohci1394_at_data_set_rcode(__le32 *data, unsigned int rcode)
{
data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_rCode_MASK);
data[1] |= cpu_to_le32((rcode << OHCI1394_AT_DATA_Q1_rCode_SHIFT) & OHCI1394_AT_DATA_Q1_rCode_MASK);
}
// Isochronous Transmit DMA.
//
// The content of first two quadlets of data for IT DMA is different from the header for IEEE 1394
// isochronous packet.
#define OHCI1394_IT_DATA_Q0_spd_MASK 0x00070000
#define OHCI1394_IT_DATA_Q0_spd_SHIFT 16
#define OHCI1394_IT_DATA_Q0_tag_MASK 0x0000c000
#define OHCI1394_IT_DATA_Q0_tag_SHIFT 14
#define OHCI1394_IT_DATA_Q0_chanNum_MASK 0x00003f00
#define OHCI1394_IT_DATA_Q0_chanNum_SHIFT 8
#define OHCI1394_IT_DATA_Q0_tcode_MASK 0x000000f0
#define OHCI1394_IT_DATA_Q0_tcode_SHIFT 4
#define OHCI1394_IT_DATA_Q0_sy_MASK 0x0000000f
#define OHCI1394_IT_DATA_Q0_sy_SHIFT 0
#define OHCI1394_IT_DATA_Q1_dataLength_MASK 0xffff0000
#define OHCI1394_IT_DATA_Q1_dataLength_SHIFT 16
static inline unsigned int ohci1394_it_data_get_speed(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_spd_MASK) >> OHCI1394_IT_DATA_Q0_spd_SHIFT;
}
static inline void ohci1394_it_data_set_speed(__le32 *data, unsigned int scode)
{
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_spd_MASK);
data[0] |= cpu_to_le32((scode << OHCI1394_IT_DATA_Q0_spd_SHIFT) & OHCI1394_IT_DATA_Q0_spd_MASK);
}
static inline unsigned int ohci1394_it_data_get_tag(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tag_MASK) >> OHCI1394_IT_DATA_Q0_tag_SHIFT;
}
static inline void ohci1394_it_data_set_tag(__le32 *data, unsigned int tag)
{
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tag_MASK);
data[0] |= cpu_to_le32((tag << OHCI1394_IT_DATA_Q0_tag_SHIFT) & OHCI1394_IT_DATA_Q0_tag_MASK);
}
static inline unsigned int ohci1394_it_data_get_channel(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_chanNum_MASK) >> OHCI1394_IT_DATA_Q0_chanNum_SHIFT;
}
static inline void ohci1394_it_data_set_channel(__le32 *data, unsigned int channel)
{
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_chanNum_MASK);
data[0] |= cpu_to_le32((channel << OHCI1394_IT_DATA_Q0_chanNum_SHIFT) & OHCI1394_IT_DATA_Q0_chanNum_MASK);
}
static inline unsigned int ohci1394_it_data_get_tcode(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tcode_MASK) >> OHCI1394_IT_DATA_Q0_tcode_SHIFT;
}
static inline void ohci1394_it_data_set_tcode(__le32 *data, unsigned int tcode)
{
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tcode_MASK);
data[0] |= cpu_to_le32((tcode << OHCI1394_IT_DATA_Q0_tcode_SHIFT) & OHCI1394_IT_DATA_Q0_tcode_MASK);
}
static inline unsigned int ohci1394_it_data_get_sync(const __le32 *data)
{
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_sy_MASK) >> OHCI1394_IT_DATA_Q0_sy_SHIFT;
}
static inline void ohci1394_it_data_set_sync(__le32 *data, unsigned int sync)
{
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_sy_MASK);
data[0] |= cpu_to_le32((sync << OHCI1394_IT_DATA_Q0_sy_SHIFT) & OHCI1394_IT_DATA_Q0_sy_MASK);
}
static inline unsigned int ohci1394_it_data_get_data_length(const __le32 *data)
{
return (le32_to_cpu(data[1]) & OHCI1394_IT_DATA_Q1_dataLength_MASK) >> OHCI1394_IT_DATA_Q1_dataLength_SHIFT;
}
static inline void ohci1394_it_data_set_data_length(__le32 *data, unsigned int data_length)
{
data[1] &= cpu_to_le32(~OHCI1394_IT_DATA_Q1_dataLength_MASK);
data[1] |= cpu_to_le32((data_length << OHCI1394_IT_DATA_Q1_dataLength_SHIFT) & OHCI1394_IT_DATA_Q1_dataLength_MASK);
}
// Self-ID DMA.
......
......@@ -134,6 +134,8 @@ struct fw_card {
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 maint_utility_register;
struct workqueue_struct *isoc_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
......@@ -509,6 +511,7 @@ union fw_iso_callback {
struct fw_iso_context {
struct fw_card *card;
struct work_struct work;
int type;
int channel;
int speed;
......@@ -528,6 +531,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
/**
* fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
* @ctx: the isochronous context
*
* Schedule a work item on workqueue to process the isochronous context. The registered callback
* function is called by the worker when a queued packet buffer with the interrupt flag is
* completed, either after transmission in the IT context or after being filled in the IR context.
* The callback function is also called when the header buffer in the context becomes full, If it
* is required to process the context in the current context, fw_iso_context_flush_completions() is
* available instead.
*
* Context: Any context.
*/
static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
{
queue_work(ctx->card->isoc_wq, &ctx->work);
}
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
......
......@@ -830,13 +830,13 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
#ifndef show_cause
enum fw_iso_context_completions_cause {
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
};
#define show_cause(cause) \
__print_symbolic(cause, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
)
#endif
......
......@@ -615,6 +615,22 @@ static void update_pcm_pointers(struct amdtp_stream *s,
// The program in user process should periodically check the status of intermediate
// buffer associated to PCM substream to process PCM frames in the buffer, instead
// of receiving notification of period elapsed by poll wait.
//
// Use another work item for period elapsed event to prevent the following AB/BA
// deadlock:
//
// thread 1 thread 2
// ================================= =================================
// A.work item (process) pcm ioctl (process)
// v v
// process_rx_packets() B.PCM stream lock
// process_tx_packets() v
// v callbacks in snd_pcm_ops
// update_pcm_pointers() v
// snd_pcm_elapsed() fw_iso_context_flush_completions()
// snd_pcm_stream_lock_irqsave() disable_work_sync()
// v v
// wait until release of B wait until A exits
if (!pcm->runtime->no_period_wakeup)
queue_work(system_highpri_wq, &s->period_work);
}
......@@ -1055,8 +1071,15 @@ static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *de
static inline void cancel_stream(struct amdtp_stream *s)
{
struct work_struct *work = current_work();
s->packet_index = -1;
if (in_softirq())
// Detect work items for any isochronous context. The work item for pcm_period_work()
// should be avoided since the call of snd_pcm_period_elapsed() can reach via
// snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at
// snd_pcm_stop_xrun().
if (work && work != &s->period_work)
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
......@@ -1856,12 +1879,9 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
struct amdtp_stream *irq_target = d->irq_target;
if (irq_target && amdtp_stream_running(irq_target)) {
// use wq to prevent AB/BA deadlock competition for
// substream lock:
// fw_iso_context_flush_completions() acquires
// lock by ohci_flush_iso_completions(),
// amdtp-stream process_rx_packets() attempts to
// acquire same lock by snd_pcm_elapsed()
// The work item to call snd_pcm_period_elapsed() can reach here by the call of
// snd_pcm_ops.pointer(), however less packets would be available then. Therefore
// the following call is just for user process contexts.
if (current_work() != &s->period_work)
fw_iso_context_flush_completions(irq_target->context);
}
......
......@@ -367,6 +367,7 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
goto end;
pcm->private_data = bebob;
pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", bebob->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
......
......@@ -441,6 +441,7 @@ int snd_dice_create_pcm(struct snd_dice *dice)
if (err < 0)
return err;
pcm->private_data = dice;
pcm->nonatomic = true;
strcpy(pcm->name, dice->card->shortname);
if (capture > 0)
......
......@@ -350,6 +350,7 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
return err;
pcm->private_data = dg00x;
pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", dg00x->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
......
......@@ -390,6 +390,7 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
return err;
pcm->private_data = ff;
pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", ff->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops);
......
......@@ -397,6 +397,7 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
goto end;
pcm->private_data = efw;
pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name), "%s PCM", efw->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
......
......@@ -454,6 +454,7 @@ static int isight_create_pcm(struct isight *isight)
if (err < 0)
return err;
pcm->private_data = isight;
pcm->nonatomic = true;
strcpy(pcm->name, "iSight");
isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
isight->pcm->ops = &ops;
......
......@@ -360,6 +360,7 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
if (err < 0)
return err;
pcm->private_data = motu;
pcm->nonatomic = true;
strcpy(pcm->name, motu->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
......
......@@ -440,6 +440,7 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
return err;
pcm->private_data = oxfw;
pcm->nonatomic = true;
strcpy(pcm->name, oxfw->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
if (cap > 0)
......
......@@ -279,6 +279,7 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
return err;
pcm->private_data = tscm;
pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", tscm->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment