Commit 36fd09dd authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: move mt76{0,2} mcu shared code in mt76x02_mcu.c

Move shared mt76x2/mt76x0 mcu shared code in a common file
and remove duplicated code
Signed-off-by: default avatarLorenzo Bianconi <lorenzo.bianconi@redhat.com>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 79394f40
...@@ -15,7 +15,7 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o ...@@ -15,7 +15,7 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
......
...@@ -41,22 +41,6 @@ static inline void skb_put_le32(struct sk_buff *skb, u32 val) ...@@ -41,22 +41,6 @@ static inline void skb_put_le32(struct sk_buff *skb, u32 val)
put_unaligned_le32(val, skb_put(skb, 4)); put_unaligned_le32(val, skb_put(skb, 4));
} }
int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
enum mcu_function func, u32 val)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, func == 5);
}
int int
mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val) mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
{ {
...@@ -227,5 +211,5 @@ int mt76x0_mcu_init(struct mt76x0_dev *dev) ...@@ -227,5 +211,5 @@ int mt76x0_mcu_init(struct mt76x0_dev *dev)
int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev) int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
{ {
return mt76x0_mcu_function_select(dev, Q_SELECT, 1); return mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
} }
...@@ -47,7 +47,4 @@ int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev); ...@@ -47,7 +47,4 @@ int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
int int
mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val); mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
int
mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
#endif #endif
...@@ -641,7 +641,7 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width) ...@@ -641,7 +641,7 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
return ; return ;
} }
mt76x0_mcu_function_select(dev, BW_SETTING, bw); mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
} }
static void static void
......
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include "mt76.h"
#include "mt76x02_mcu.h"
#include "mt76x02_dma.h"
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
memcpy(skb_put(skb, len), data, len);
return skb;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
static struct sk_buff *
mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
{
unsigned long timeout;
if (!time_is_after_jiffies(expires))
return NULL;
timeout = expires - jiffies;
wait_event_timeout(dev->mmio.mcu.wait,
!skb_queue_empty(&dev->mmio.mcu.res_q),
timeout);
return skb_dequeue(&dev->mmio.mcu.res_q);
}
static int
mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
struct mt76_queue *q = &dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
dev->queue_ops->kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
unsigned long expires = jiffies + HZ;
int ret;
u8 seq;
if (!skb)
return -EINVAL;
mutex_lock(&dev->mmio.mcu.mutex);
seq = ++dev->mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++dev->mmio.mcu.msg_seq & 0xf;
ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
if (ret)
goto out;
while (wait_resp) {
u32 *rxfce;
bool check_seq = false;
skb = mt76x02_mcu_get_response(dev, expires);
if (!skb) {
dev_err(dev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
break;
}
rxfce = (u32 *) skb->cb;
if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
check_seq = true;
dev_kfree_skb(skb);
if (check_seq)
break;
}
out:
mutex_unlock(&dev->mmio.mcu.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
int mt76x02_mcu_function_select(struct mt76_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
bool wait_resp)
{
struct sk_buff *skb;
struct {
__le32 mode;
__le32 level;
} __packed __aligned(4) msg = {
.mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
.level = cpu_to_le32(0),
};
skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
int mt76x02_mcu_cleanup(struct mt76_dev *dev)
{
struct sk_buff *skb;
dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
...@@ -85,4 +85,14 @@ struct mt76x02_patch_header { ...@@ -85,4 +85,14 @@ struct mt76x02_patch_header {
u8 pad[2]; u8 pad[2];
}; };
int mt76x02_mcu_cleanup(struct mt76_dev *dev);
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp);
int mt76x02_mcu_function_select(struct mt76_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp);
int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
bool wait_resp);
#endif /* __MT76x02_MCU_H */ #endif /* __MT76x02_MCU_H */
...@@ -188,18 +188,14 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev); ...@@ -188,18 +188,14 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
int mt76x2_mcu_init(struct mt76x2_dev *dev); int mt76x2_mcu_init(struct mt76x2_dev *dev);
int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan); u8 bw_index, bool scan);
int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel); u8 channel);
int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
int mt76x2_dma_init(struct mt76x2_dev *dev); int mt76x2_dma_init(struct mt76x2_dev *dev);
void mt76x2_dma_cleanup(struct mt76x2_dev *dev); void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
void mt76x2_cleanup(struct mt76x2_dev *dev); void mt76x2_cleanup(struct mt76x2_dev *dev);
int mt76x2_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq);
void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb); struct sk_buff *skb);
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
......
...@@ -17,36 +17,6 @@ ...@@ -17,36 +17,6 @@
#include "mt76x2.h" #include "mt76x2.h"
#include "mt76x02_dma.h" #include "mt76x02_dma.h"
int
mt76x2_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
struct mt76_queue *q = &dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
dev->queue_ops->kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
static int static int
mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc) int idx, int n_desc)
......
...@@ -402,7 +402,7 @@ void mt76x2_stop_hardware(struct mt76x2_dev *dev) ...@@ -402,7 +402,7 @@ void mt76x2_stop_hardware(struct mt76x2_dev *dev)
{ {
cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work); cancel_delayed_work_sync(&dev->mac_work);
mt76x2_mcu_set_radio_state(dev, false); mt76x02_mcu_set_radio_state(&dev->mt76, false, true);
mt76x2_mac_stop(dev, false); mt76x2_mac_stop(dev, false);
} }
...@@ -412,7 +412,7 @@ void mt76x2_cleanup(struct mt76x2_dev *dev) ...@@ -412,7 +412,7 @@ void mt76x2_cleanup(struct mt76x2_dev *dev)
tasklet_disable(&dev->pre_tbtt_tasklet); tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_stop_hardware(dev); mt76x2_stop_hardware(dev);
mt76x2_dma_cleanup(dev); mt76x2_dma_cleanup(dev);
mt76x2_mcu_cleanup(dev); mt76x02_mcu_cleanup(&dev->mt76);
} }
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
......
...@@ -23,83 +23,6 @@ ...@@ -23,83 +23,6 @@
#include "mt76x2_eeprom.h" #include "mt76x2_eeprom.h"
#include "mt76x02_dma.h" #include "mt76x02_dma.h"
static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
memcpy(skb_put(skb, len), data, len);
return skb;
}
static struct sk_buff *
mt76x2_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
{
unsigned long timeout;
if (!time_is_after_jiffies(expires))
return NULL;
timeout = expires - jiffies;
wait_event_timeout(dev->mmio.mcu.wait,
!skb_queue_empty(&dev->mmio.mcu.res_q),
timeout);
return skb_dequeue(&dev->mmio.mcu.res_q);
}
static int
mt76x2_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
unsigned long expires = jiffies + HZ;
int ret;
u8 seq;
if (!skb)
return -EINVAL;
mutex_lock(&dev->mmio.mcu.mutex);
seq = ++dev->mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++dev->mmio.mcu.msg_seq & 0xf;
ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
if (ret)
goto out;
while (wait_resp) {
u32 *rxfce;
bool check_seq = false;
skb = mt76x2_mcu_get_response(dev, expires);
if (!skb) {
dev_err(dev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
break;
}
rxfce = (u32 *) skb->cb;
if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
check_seq = true;
dev_kfree_skb(skb);
if (check_seq)
break;
}
out:
mutex_unlock(&dev->mmio.mcu.mutex);
return ret;
}
static int static int
mt76pci_load_rom_patch(struct mt76x2_dev *dev) mt76pci_load_rom_patch(struct mt76x2_dev *dev)
{ {
...@@ -242,23 +165,6 @@ mt76pci_load_firmware(struct mt76x2_dev *dev) ...@@ -242,23 +165,6 @@ mt76pci_load_firmware(struct mt76x2_dev *dev)
return -ENOENT; return -ENOENT;
} }
static int
mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
u32 val)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, true);
}
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel) u8 channel)
{ {
...@@ -319,21 +225,6 @@ int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, ...@@ -319,21 +225,6 @@ int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true); return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
} }
int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
{
struct sk_buff *skb;
struct {
__le32 mode;
__le32 level;
} __packed __aligned(4) msg = {
.mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
.level = cpu_to_le32(0),
};
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, true);
}
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param) u32 param)
{ {
...@@ -399,8 +290,8 @@ int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, ...@@ -399,8 +290,8 @@ int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
int mt76x2_mcu_init(struct mt76x2_dev *dev) int mt76x2_mcu_init(struct mt76x2_dev *dev)
{ {
static const struct mt76_mcu_ops mt76x2_mcu_ops = { static const struct mt76_mcu_ops mt76x2_mcu_ops = {
.mcu_msg_alloc = mt76x2_mcu_msg_alloc, .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
.mcu_send_msg = mt76x2_mcu_msg_send, .mcu_send_msg = mt76x02_mcu_msg_send,
}; };
int ret; int ret;
...@@ -414,19 +305,6 @@ int mt76x2_mcu_init(struct mt76x2_dev *dev) ...@@ -414,19 +305,6 @@ int mt76x2_mcu_init(struct mt76x2_dev *dev)
if (ret) if (ret)
return ret; return ret;
mt76x2_mcu_function_select(dev, Q_SELECT, 1); mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true);
return 0;
}
int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
{
struct sk_buff *skb;
mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0; return 0;
} }
...@@ -489,7 +489,7 @@ int mt76x2_phy_start(struct mt76x2_dev *dev) ...@@ -489,7 +489,7 @@ int mt76x2_phy_start(struct mt76x2_dev *dev)
{ {
int ret; int ret;
ret = mt76x2_mcu_set_radio_state(dev, true); ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true);
if (ret) if (ret)
return ret; return ret;
......
...@@ -60,7 +60,6 @@ int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, ...@@ -60,7 +60,6 @@ int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
bool force); bool force);
int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca); bool ext, int rssi, u32 false_cca);
int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
u8 temp_level, u8 channel); u8 temp_level, u8 channel);
int mt76x2u_mcu_init(struct mt76x2_dev *dev); int mt76x2u_mcu_init(struct mt76x2_dev *dev);
......
...@@ -310,7 +310,7 @@ void mt76x2u_stop_hw(struct mt76x2_dev *dev) ...@@ -310,7 +310,7 @@ void mt76x2u_stop_hw(struct mt76x2_dev *dev)
void mt76x2u_cleanup(struct mt76x2_dev *dev) void mt76x2u_cleanup(struct mt76x2_dev *dev)
{ {
mt76x2u_mcu_set_radio_state(dev, false); mt76x02_mcu_set_radio_state(&dev->mt76, false, false);
mt76x2u_stop_hw(dev); mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76); mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76); mt76u_mcu_deinit(&dev->mt76);
......
...@@ -29,40 +29,6 @@ ...@@ -29,40 +29,6 @@
#define MT76U_MCU_DLM_OFFSET 0x110000 #define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 #define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
static int
mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
u32 val)
{
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
struct sk_buff *skb;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
func != Q_SELECT);
}
int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
{
struct {
__le32 mode;
__le32 level;
} __packed __aligned(4) msg = {
.mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
.level = cpu_to_le32(0),
};
struct sk_buff *skb;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
false);
}
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel) u8 channel)
{ {
...@@ -426,9 +392,10 @@ int mt76x2u_mcu_init(struct mt76x2_dev *dev) ...@@ -426,9 +392,10 @@ int mt76x2u_mcu_init(struct mt76x2_dev *dev)
{ {
int err; int err;
err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1); err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT,
1, false);
if (err < 0) if (err < 0)
return err; return err;
return mt76x2u_mcu_set_radio_state(dev, true); return mt76x02_mcu_set_radio_state(&dev->mt76, true, false);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment