Commit 8eb891fc authored by Linus Torvalds's avatar Linus Torvalds

Revert "USB: EHCI cpufreq fix"

This reverts commit 196705c9.  It was
reported to cause a regression by Daniel Exner, and Arjan van de Ven
points out that we actually already have infrastructure in place for
setting limits on acceptable DMA latency that would be the much more
correct fix for the problem with some Broadcom EHCI controllers.

Fixed up trivial conflicts due to the changes to support big-endian host
controller descriptors in drivers/usb/host/{ehci-sched.c,ehci.h}.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 848c4dd5
...@@ -275,58 +275,6 @@ static void ehci_work(struct ehci_hcd *ehci); ...@@ -275,58 +275,6 @@ static void ehci_work(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
#ifdef CONFIG_CPU_FREQ
#include <linux/cpufreq.h>
static void ehci_cpufreq_pause (struct ehci_hcd *ehci)
{
unsigned long flags;
spin_lock_irqsave(&ehci->lock, flags);
if (!ehci->cpufreq_changing++)
qh_inactivate_split_intr_qhs(ehci);
spin_unlock_irqrestore(&ehci->lock, flags);
}
static void ehci_cpufreq_unpause (struct ehci_hcd *ehci)
{
unsigned long flags;
spin_lock_irqsave(&ehci->lock, flags);
if (!--ehci->cpufreq_changing)
qh_reactivate_split_intr_qhs(ehci);
spin_unlock_irqrestore(&ehci->lock, flags);
}
/*
* ehci_cpufreq_notifier is needed to avoid MMF errors that occur when
* EHCI controllers that don't cache many uframes get delayed trying to
* read main memory during CPU frequency transitions. This can cause
* split interrupt transactions to not be completed in the required uframe.
* This has been observed on the Broadcom/ServerWorks HT1000 controller.
*/
static int ehci_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct ehci_hcd *ehci = container_of(nb, struct ehci_hcd,
cpufreq_transition);
switch (val) {
case CPUFREQ_PRECHANGE:
ehci_cpufreq_pause(ehci);
break;
case CPUFREQ_POSTCHANGE:
ehci_cpufreq_unpause(ehci);
break;
}
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
static void ehci_watchdog (unsigned long param) static void ehci_watchdog (unsigned long param)
{ {
struct ehci_hcd *ehci = (struct ehci_hcd *) param; struct ehci_hcd *ehci = (struct ehci_hcd *) param;
...@@ -460,10 +408,6 @@ static void ehci_stop (struct usb_hcd *hcd) ...@@ -460,10 +408,6 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_writel(ehci, 0, &ehci->regs->intr_enable); ehci_writel(ehci, 0, &ehci->regs->intr_enable);
spin_unlock_irq(&ehci->lock); spin_unlock_irq(&ehci->lock);
#ifdef CONFIG_CPU_FREQ
cpufreq_unregister_notifier(&ehci->cpufreq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
/* let companion controllers work when we aren't */ /* let companion controllers work when we aren't */
ehci_writel(ehci, 0, &ehci->regs->configured_flag); ehci_writel(ehci, 0, &ehci->regs->configured_flag);
...@@ -569,17 +513,6 @@ static int ehci_init(struct usb_hcd *hcd) ...@@ -569,17 +513,6 @@ static int ehci_init(struct usb_hcd *hcd)
} }
ehci->command = temp; ehci->command = temp;
#ifdef CONFIG_CPU_FREQ
INIT_LIST_HEAD(&ehci->split_intr_qhs);
/*
* If the EHCI controller caches enough uframes, this probably
* isn't needed unless there are so many low/full speed devices
* that the controller's can't cache it all.
*/
ehci->cpufreq_transition.notifier_call = ehci_cpufreq_notifier;
cpufreq_register_notifier(&ehci->cpufreq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
return 0; return 0;
} }
......
...@@ -94,9 +94,6 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) ...@@ -94,9 +94,6 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
qh->qh_dma = dma; qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list); // INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list); INIT_LIST_HEAD (&qh->qtd_list);
#ifdef CONFIG_CPU_FREQ
INIT_LIST_HEAD (&qh->split_intr_qhs);
#endif
/* dummy td enables safe urb queuing */ /* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags); qh->dummy = ehci_qtd_alloc (ehci, flags);
......
...@@ -312,10 +312,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -312,10 +312,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
struct urb *urb; struct urb *urb;
u32 token = 0; u32 token = 0;
/* ignore QHs that are currently inactive */
if (qh->hw_info1 & __constant_cpu_to_le32(QH_INACTIVATE))
break;
qtd = list_entry (entry, struct ehci_qtd, qtd_list); qtd = list_entry (entry, struct ehci_qtd, qtd_list);
urb = qtd->urb; urb = qtd->urb;
......
...@@ -479,109 +479,6 @@ static int disable_periodic (struct ehci_hcd *ehci) ...@@ -479,109 +479,6 @@ static int disable_periodic (struct ehci_hcd *ehci)
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
#ifdef CONFIG_CPU_FREQ
static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int now; /* current (frame * 8) + uframe */
int prev_start, next_start; /* uframes from/to split start */
int start_uframe = ffs(le32_to_cpup (&qh->hw_info2) & QH_SMASK);
int end_uframe = fls((le32_to_cpup (&qh->hw_info2) & QH_CMASK) >> 8);
int split_duration = end_uframe - start_uframe;
now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3);
next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now)
% (qh->period << 3);
prev_start = (qh->period << 3) - next_start;
/*
* Make sure there will be at least one uframe when qh is safe.
*/
if ((qh->period << 3) <= (ehci->i_thresh + 2 + split_duration))
/* never safe */
return -EINVAL;
/*
* Wait 1 uframe after transaction should have started, to make
* sure controller has time to write back overlay, so we can
* check QTD_STS_STS to see if transaction is in progress.
*/
if ((next_start > ehci->i_thresh) && (prev_start > 1))
/* safe to set "i" bit if split isn't in progress */
return (qh->hw_token & STATUS_BIT(ehci)) ? 0 : 1;
else
return 0;
}
/* Set inactivate bit for all the split interrupt QHs. */
static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
int not_done, safe;
u32 inactivate = INACTIVATE_BIT(ehci);
u32 active = ACTIVE_BIT(ehci);
do {
not_done = 0;
list_for_each_entry(qh, &ehci->split_intr_qhs,
split_intr_qhs) {
if (qh->hw_info1 & inactivate)
/* already off */
continue;
/*
* To avoid setting "I" after the start split happens,
* don't set it if the QH might be cached in the
* controller. Some HCs (Broadcom/ServerWorks HT1000)
* will stop in the middle of a split transaction when
* the "I" bit is set.
*/
safe = safe_to_modify_i(ehci, qh);
if (safe == 0) {
not_done = 1;
} else if (safe > 0) {
qh->was_active = qh->hw_token & active;
qh->hw_info1 |= inactivate;
}
}
} while (not_done);
wmb();
}
static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
u32 token;
int not_done, safe;
u32 inactivate = INACTIVATE_BIT(ehci);
u32 active = ACTIVE_BIT(ehci);
u32 halt = HALT_BIT(ehci);
do {
not_done = 0;
list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) {
if (!(qh->hw_info1 & inactivate)) /* already on */
continue;
/*
* Don't reactivate if cached, or controller might
* overwrite overlay after we modify it!
*/
safe = safe_to_modify_i(ehci, qh);
if (safe == 0) {
not_done = 1;
} else if (safe > 0) {
/* See EHCI 1.0 section 4.15.2.4. */
token = qh->hw_token;
qh->hw_token = (token | halt) & ~active;
wmb();
qh->hw_info1 &= ~inactivate;
wmb();
qh->hw_token = (token & ~halt) | qh->was_active;
}
}
} while (not_done);
}
#endif
/* periodic schedule slots have iso tds (normal or split) first, then a /* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers. * sparse tree for active interrupt transfers.
...@@ -599,17 +496,6 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -599,17 +496,6 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs); qh, qh->start, qh->usecs, qh->c_usecs);
#ifdef CONFIG_CPU_FREQ
/*
* If low/full speed interrupt QHs are inactive (because of
* cpufreq changing processor speeds), start QH with I flag set--
* it will automatically be cleared when cpufreq is done.
*/
if (ehci->cpufreq_changing)
if (!(qh->hw_info1 & (cpu_to_le32(1 << 13))))
qh->hw_info1 |= INACTIVATE_BIT(ehci);
#endif
/* high bandwidth, or otherwise every microframe */ /* high bandwidth, or otherwise every microframe */
if (period == 0) if (period == 0)
period = 1; period = 1;
...@@ -658,12 +544,6 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -658,12 +544,6 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
? ((qh->usecs + qh->c_usecs) / qh->period) ? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8); : (qh->usecs * 8);
#ifdef CONFIG_CPU_FREQ
/* add qh to list of low/full speed interrupt QHs, if applicable */
if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
list_add(&qh->split_intr_qhs, &ehci->split_intr_qhs);
}
#endif
/* maybe enable periodic schedule processing */ /* maybe enable periodic schedule processing */
if (!ehci->periodic_sched++) if (!ehci->periodic_sched++)
return enable_periodic (ehci); return enable_periodic (ehci);
...@@ -683,13 +563,6 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -683,13 +563,6 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
// THEN // THEN
// qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */); // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */);
#ifdef CONFIG_CPU_FREQ
/* remove qh from list of low/full speed interrupt QHs */
if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
list_del_init(&qh->split_intr_qhs);
}
#endif
/* high bandwidth, or otherwise part of every microframe */ /* high bandwidth, or otherwise part of every microframe */
if ((period = qh->period) == 0) if ((period = qh->period) == 0)
period = 1; period = 1;
......
...@@ -71,12 +71,6 @@ struct ehci_hcd { /* one per controller */ ...@@ -71,12 +71,6 @@ struct ehci_hcd { /* one per controller */
__u32 hcs_params; /* cached register copy */ __u32 hcs_params; /* cached register copy */
spinlock_t lock; spinlock_t lock;
#ifdef CONFIG_CPU_FREQ
struct notifier_block cpufreq_transition;
int cpufreq_changing;
struct list_head split_intr_qhs;
#endif
/* async schedule support */ /* async schedule support */
struct ehci_qh *async; struct ehci_qh *async;
struct ehci_qh *reclaim; struct ehci_qh *reclaim;
...@@ -439,10 +433,6 @@ struct ehci_qh { ...@@ -439,10 +433,6 @@ struct ehci_qh {
__hc32 hw_next; /* see EHCI 3.6.1 */ __hc32 hw_next; /* see EHCI 3.6.1 */
__hc32 hw_info1; /* see EHCI 3.6.2 */ __hc32 hw_info1; /* see EHCI 3.6.2 */
#define QH_HEAD 0x00008000 #define QH_HEAD 0x00008000
#define QH_INACTIVATE 0x00000080
#define INACTIVATE_BIT(ehci) cpu_to_hc32(ehci, QH_INACTIVATE)
__hc32 hw_info2; /* see EHCI 3.6.2 */ __hc32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff #define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00 #define QH_CMASK 0x0000ff00
...@@ -492,10 +482,6 @@ struct ehci_qh { ...@@ -492,10 +482,6 @@ struct ehci_qh {
unsigned short start; /* where polling starts */ unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */ #define NO_FRAME ((unsigned short)~0) /* pick new start */
struct usb_device *dev; /* access to TT */ struct usb_device *dev; /* access to TT */
#ifdef CONFIG_CPU_FREQ
struct list_head split_intr_qhs; /* list of split qhs */
__le32 was_active; /* active bit before "i" set */
#endif
} __attribute__ ((aligned (32))); } __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment