Commit a5a4f468 authored by hayeswang's avatar hayeswang Committed by David S. Miller

r8152: replace lockflags with flags

Replace lockflags with flags.
Signed-off-by: default avatarHayes Wang <hayeswang@realtek.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dff4e8ad
...@@ -769,7 +769,7 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev) ...@@ -769,7 +769,7 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
static void read_bulk_callback(struct urb *urb) static void read_bulk_callback(struct urb *urb)
{ {
struct net_device *netdev; struct net_device *netdev;
unsigned long lockflags; unsigned long flags;
int status = urb->status; int status = urb->status;
struct rx_agg *agg; struct rx_agg *agg;
struct r8152 *tp; struct r8152 *tp;
...@@ -798,9 +798,9 @@ static void read_bulk_callback(struct urb *urb) ...@@ -798,9 +798,9 @@ static void read_bulk_callback(struct urb *urb)
if (urb->actual_length < ETH_ZLEN) if (urb->actual_length < ETH_ZLEN)
break; break;
spin_lock_irqsave(&tp->rx_lock, lockflags); spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done); list_add_tail(&agg->list, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, lockflags); spin_unlock_irqrestore(&tp->rx_lock, flags);
tasklet_schedule(&tp->tl); tasklet_schedule(&tp->tl);
return; return;
case -ESHUTDOWN: case -ESHUTDOWN:
...@@ -821,9 +821,9 @@ static void read_bulk_callback(struct urb *urb) ...@@ -821,9 +821,9 @@ static void read_bulk_callback(struct urb *urb)
if (result == -ENODEV) { if (result == -ENODEV) {
netif_device_detach(tp->netdev); netif_device_detach(tp->netdev);
} else if (result) { } else if (result) {
spin_lock_irqsave(&tp->rx_lock, lockflags); spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done); list_add_tail(&agg->list, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, lockflags); spin_unlock_irqrestore(&tp->rx_lock, flags);
tasklet_schedule(&tp->tl); tasklet_schedule(&tp->tl);
} }
} }
...@@ -831,7 +831,7 @@ static void read_bulk_callback(struct urb *urb) ...@@ -831,7 +831,7 @@ static void read_bulk_callback(struct urb *urb)
static void write_bulk_callback(struct urb *urb) static void write_bulk_callback(struct urb *urb)
{ {
struct net_device_stats *stats; struct net_device_stats *stats;
unsigned long lockflags; unsigned long flags;
struct tx_agg *agg; struct tx_agg *agg;
struct r8152 *tp; struct r8152 *tp;
int status = urb->status; int status = urb->status;
...@@ -853,9 +853,9 @@ static void write_bulk_callback(struct urb *urb) ...@@ -853,9 +853,9 @@ static void write_bulk_callback(struct urb *urb)
stats->tx_bytes += agg->skb_len; stats->tx_bytes += agg->skb_len;
} }
spin_lock_irqsave(&tp->tx_lock, lockflags); spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free); list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, lockflags); spin_unlock_irqrestore(&tp->tx_lock, flags);
if (!netif_carrier_ok(tp->netdev)) if (!netif_carrier_ok(tp->netdev))
return; return;
...@@ -1119,7 +1119,7 @@ static void rx_bottom(struct r8152 *tp) ...@@ -1119,7 +1119,7 @@ static void rx_bottom(struct r8152 *tp)
struct net_device *netdev; struct net_device *netdev;
struct rx_agg *agg; struct rx_agg *agg;
struct rx_desc *rx_desc; struct rx_desc *rx_desc;
unsigned long lockflags; unsigned long flags;
struct list_head *cursor, *next; struct list_head *cursor, *next;
struct sk_buff *skb; struct sk_buff *skb;
struct urb *urb; struct urb *urb;
...@@ -1132,16 +1132,16 @@ static void rx_bottom(struct r8152 *tp) ...@@ -1132,16 +1132,16 @@ static void rx_bottom(struct r8152 *tp)
stats = rtl8152_get_stats(netdev); stats = rtl8152_get_stats(netdev);
spin_lock_irqsave(&tp->rx_lock, lockflags); spin_lock_irqsave(&tp->rx_lock, flags);
list_for_each_safe(cursor, next, &tp->rx_done) { list_for_each_safe(cursor, next, &tp->rx_done) {
list_del_init(cursor); list_del_init(cursor);
spin_unlock_irqrestore(&tp->rx_lock, lockflags); spin_unlock_irqrestore(&tp->rx_lock, flags);
agg = list_entry(cursor, struct rx_agg, list); agg = list_entry(cursor, struct rx_agg, list);
urb = agg->urb; urb = agg->urb;
if (urb->actual_length < ETH_ZLEN) { if (urb->actual_length < ETH_ZLEN) {
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
spin_lock_irqsave(&tp->rx_lock, lockflags); spin_lock_irqsave(&tp->rx_lock, flags);
if (ret && ret != -ENODEV) { if (ret && ret != -ENODEV) {
list_add_tail(&agg->list, next); list_add_tail(&agg->list, next);
tasklet_schedule(&tp->tl); tasklet_schedule(&tp->tl);
...@@ -1182,13 +1182,13 @@ static void rx_bottom(struct r8152 *tp) ...@@ -1182,13 +1182,13 @@ static void rx_bottom(struct r8152 *tp)
} }
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
spin_lock_irqsave(&tp->rx_lock, lockflags); spin_lock_irqsave(&tp->rx_lock, flags);
if (ret && ret != -ENODEV) { if (ret && ret != -ENODEV) {
list_add_tail(&agg->list, next); list_add_tail(&agg->list, next);
tasklet_schedule(&tp->tl); tasklet_schedule(&tp->tl);
} }
} }
spin_unlock_irqrestore(&tp->rx_lock, lockflags); spin_unlock_irqrestore(&tp->rx_lock, flags);
} }
static void tx_bottom(struct r8152 *tp) static void tx_bottom(struct r8152 *tp)
...@@ -1196,7 +1196,7 @@ static void tx_bottom(struct r8152 *tp) ...@@ -1196,7 +1196,7 @@ static void tx_bottom(struct r8152 *tp)
struct net_device_stats *stats; struct net_device_stats *stats;
struct net_device *netdev; struct net_device *netdev;
struct tx_agg *agg; struct tx_agg *agg;
unsigned long lockflags; unsigned long flags;
u32 remain, total; u32 remain, total;
u8 *tx_data; u8 *tx_data;
int res; int res;
...@@ -1205,7 +1205,7 @@ static void tx_bottom(struct r8152 *tp) ...@@ -1205,7 +1205,7 @@ static void tx_bottom(struct r8152 *tp)
next_agg: next_agg:
agg = NULL; agg = NULL;
spin_lock_irqsave(&tp->tx_lock, lockflags); spin_lock_irqsave(&tp->tx_lock, flags);
if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) { if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) {
struct list_head *cursor; struct list_head *cursor;
...@@ -1213,7 +1213,7 @@ static void tx_bottom(struct r8152 *tp) ...@@ -1213,7 +1213,7 @@ static void tx_bottom(struct r8152 *tp)
list_del_init(cursor); list_del_init(cursor);
agg = list_entry(cursor, struct tx_agg, list); agg = list_entry(cursor, struct tx_agg, list);
} }
spin_unlock_irqrestore(&tp->tx_lock, lockflags); spin_unlock_irqrestore(&tp->tx_lock, flags);
if (!agg) if (!agg)
return; return;
...@@ -1268,9 +1268,9 @@ static void tx_bottom(struct r8152 *tp) ...@@ -1268,9 +1268,9 @@ static void tx_bottom(struct r8152 *tp)
netif_warn(tp, tx_err, netdev, netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res); "failed tx_urb %d\n", res);
stats->tx_dropped += agg->skb_num; stats->tx_dropped += agg->skb_num;
spin_lock_irqsave(&tp->tx_lock, lockflags); spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free); list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, lockflags); spin_unlock_irqrestore(&tp->tx_lock, flags);
} }
return; return;
} }
...@@ -1373,7 +1373,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, ...@@ -1373,7 +1373,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
{ {
struct r8152 *tp = netdev_priv(netdev); struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev); struct net_device_stats *stats = rtl8152_get_stats(netdev);
unsigned long lockflags; unsigned long flags;
struct tx_agg *agg = NULL; struct tx_agg *agg = NULL;
struct tx_desc *tx_desc; struct tx_desc *tx_desc;
unsigned int len; unsigned int len;
...@@ -1382,7 +1382,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, ...@@ -1382,7 +1382,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
spin_lock_irqsave(&tp->tx_lock, lockflags); spin_lock_irqsave(&tp->tx_lock, flags);
if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) { if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) {
struct list_head *cursor; struct list_head *cursor;
...@@ -1390,7 +1390,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, ...@@ -1390,7 +1390,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
list_del_init(cursor); list_del_init(cursor);
agg = list_entry(cursor, struct tx_agg, list); agg = list_entry(cursor, struct tx_agg, list);
} }
spin_unlock_irqrestore(&tp->tx_lock, lockflags); spin_unlock_irqrestore(&tp->tx_lock, flags);
if (!agg) { if (!agg) {
skb_queue_tail(&tp->tx_queue, skb); skb_queue_tail(&tp->tx_queue, skb);
...@@ -1419,9 +1419,9 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, ...@@ -1419,9 +1419,9 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
netif_warn(tp, tx_err, netdev, netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res); "failed tx_urb %d\n", res);
stats->tx_dropped++; stats->tx_dropped++;
spin_lock_irqsave(&tp->tx_lock, lockflags); spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free); list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, lockflags); spin_unlock_irqrestore(&tp->tx_lock, flags);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment