Commit fb8d1d7e authored by David S. Miller's avatar David S. Miller

Merge branch 'net-aquantia-PTP-support-for-AQC-devices'

Igor Russkikh says:

====================
net: aquantia: PTP support for AQC devices

This patchset introduces PTP feature support in Aquantia AQC atlantic driver.

This implementation is a joined effort of aquantia developers:
Egor is the main designer and driver/firmware architect on PTP,
Sergey and Dmitry are included as co-developers.
Dmitry also helped me in the overall patchset preparations.

Feature was verified on AQC hardware with testptp tool, linuxptp,
gptp and with Motu hardware unit.

version3 updates:
- Review comments applied: error handling, various fixes

version2 updates:
- Fixing issues from Andrew's review: replacing self with
  ptp var name, making ptp_clk_offset a field in the ptp instance.
  devm_kzalloc advice is actually non applicable, because ptp object gets
  created/destroyed on each network device close/open and it should not be
  linked with dev lifecycle.
- Rearranging commit authorship, adding Egor as a ptp module main maintainer
- Fixing kbuild 32bit division issues
====================
Acked-by: default avatarRichard Cochran <richardcochran@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 337d866a 4ef511bc
......@@ -1190,6 +1190,13 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/aquantia/atlantic/
F: Documentation/networking/device_drivers/aquantia/atlantic.txt
AQUANTIA ETHERNET DRIVER PTP SUBSYSTEM
M: Egor Pomozov <epomozov@marvell.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.aquantia.com
F: drivers/net/ethernet/aquantia/atlantic/aq_ptp*
ARC FRAMEBUFFER DRIVER
M: Jaya Kumar <jayalk@intworks.biz>
S: Maintained
......
......@@ -24,6 +24,8 @@ atlantic-objs := aq_main.o \
aq_ethtool.o \
aq_drvinfo.o \
aq_filters.o \
aq_ptp.o \
aq_phy.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_cfg.h: Definition of configuration parameters and constants. */
......@@ -27,7 +27,7 @@
#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
#define AQ_CFG_IRQ_MASK 0x1FFU
#define AQ_CFG_IRQ_MASK 0x3FFU
#define AQ_CFG_VECS_MAX 8U
#define AQ_CFG_TCS_MAX 8U
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ethtool.c: Definition of ethertool related functions. */
......@@ -9,8 +9,11 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_ptp.h"
#include "aq_filters.h"
#include <linux/ptp_clock_kernel.h>
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
......@@ -377,6 +380,35 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
return err;
}
static int aq_ethtool_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
ethtool_op_get_ts_info(ndev, info);
if (!aq_nic->aq_ptp)
return 0;
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp));
return 0;
}
static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
{
u32 rate = 0;
......@@ -604,4 +636,5 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_link_ksettings = aq_ethtool_set_link_ksettings,
.get_coalesce = aq_ethtool_get_coalesce,
.set_coalesce = aq_ethtool_set_coalesce,
.get_ts_info = aq_ethtool_get_ts_info,
};
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* Copyright (C) 2014-2019 aQuantia Corporation. */
/* File aq_filters.c: RX filters related functions. */
......@@ -89,12 +89,14 @@ static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
fsp->location > AQ_RX_LAST_LOC_FL3L4) {
fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FL3L4,
AQ_RX_LAST_LOC_FL3L4);
AQ_RX_FIRST_LOC_FL3L4, last_location);
return -EINVAL;
}
if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
......@@ -124,12 +126,15 @@ aq_check_approve_fl2(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
u32 last_location = AQ_RX_LAST_LOC_FETHERT -
aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
fsp->location > AQ_RX_LAST_LOC_FETHERT) {
fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FETHERT,
AQ_RX_LAST_LOC_FETHERT);
last_location);
return -EINVAL;
}
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
......@@ -15,6 +15,9 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
#define AQ_HW_MAC_COUNTER_HZ 312500000ll
#define AQ_HW_PHY_COUNTER_HZ 160000000ll
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
......@@ -94,6 +97,7 @@ struct aq_stats_s {
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
#define AQ_HW_FLAG_CLOSING 0x00000020U
#define AQ_HW_PTP_AVAILABLE 0x01000000U
#define AQ_HW_LINK_DOWN 0x04000000U
#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_HW_FLAG_ERR_HW 0x80000000U
......@@ -135,6 +139,8 @@ struct aq_hw_s {
u32 rpc_addr;
u32 rpc_tid;
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
u16 phy_id;
};
struct aq_ring_s;
......@@ -235,6 +241,40 @@ struct aq_hw_ops {
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_ring_hwts_rx_receive)(struct aq_hw_s *self,
struct aq_ring_s *ring);
void (*hw_get_ptp_ts)(struct aq_hw_s *self, u64 *stamp);
int (*hw_adj_clock_freq)(struct aq_hw_s *self, s32 delta);
int (*hw_adj_sys_clock)(struct aq_hw_s *self, s64 delta);
int (*hw_set_sys_clock)(struct aq_hw_s *self, u64 time, u64 ts);
int (*hw_ts_to_sys_clock)(struct aq_hw_s *self, u64 ts, u64 *time);
int (*hw_gpio_pulse)(struct aq_hw_s *self, u32 index, u64 start,
u32 period);
int (*hw_extts_gpio_enable)(struct aq_hw_s *self, u32 index,
u32 enable);
int (*hw_get_sync_ts)(struct aq_hw_s *self, u64 *ts);
u16 (*rx_extract_ts)(struct aq_hw_s *self, u8 *p, unsigned int len,
u64 *timestamp);
int (*extract_hwts)(struct aq_hw_s *self, u8 *p, unsigned int len,
u64 *timestamp);
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
......@@ -267,6 +307,12 @@ struct aq_fw_ops {
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
u8 *mac);
int (*send_fw_request)(struct aq_hw_s *self,
const struct hw_fw_request_iface *fw_req,
size_t size);
void (*enable_ptp)(struct aq_hw_s *self, int enable);
int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_main.c: Main file for aQuantia Linux driver. */
......@@ -10,10 +10,13 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
#include "aq_ptp.h"
#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
......@@ -93,6 +96,24 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
/* Hardware adds the Timestamp for PTPv2 802.AS1
* and PTPv2 IPv4 UDP.
* We have to push even general 320 port messages to the ptp
* queue explicitly. This is a limitation of current firmware
* and hardware PTP design of the chip. Otherwise ptp stream
* will fail to sync
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
unlikely((ip_hdr(skb)->version == 4) &&
(ip_hdr(skb)->protocol == IPPROTO_UDP) &&
((udp_hdr(skb)->dest == htons(319)) ||
(udp_hdr(skb)->dest == htons(320)))) ||
unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
return aq_ptp_xmit(aq_nic, skb);
}
skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
......@@ -197,6 +218,87 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
(void)aq_nic_set_multicast_list(aq_nic, ndev);
}
static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
struct hwtstamp_config *config)
{
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_NONE:
break;
default:
return -ERANGE;
}
return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
}
static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
{
struct hwtstamp_config config;
int ret_val;
if (!aq_nic->aq_ptp)
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
if (ret_val)
return ret_val;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
{
struct hwtstamp_config config;
if (!aq_nic->aq_ptp)
return -EOPNOTSUPP;
aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct aq_nic_s *aq_nic = netdev_priv(netdev);
switch (cmd) {
case SIOCSHWTSTAMP:
return aq_ndev_hwtstamp_set(aq_nic, ifr);
case SIOCGHWTSTAMP:
return aq_ndev_hwtstamp_get(aq_nic, ifr);
}
return -EOPNOTSUPP;
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
......@@ -234,6 +336,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
.ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.c: Definition of common code for NIC. */
......@@ -12,6 +12,9 @@
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_main.h"
#include "aq_phy.h"
#include "aq_ptp.h"
#include "aq_filters.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
......@@ -145,6 +148,13 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
if (self->aq_ptp) {
aq_ptp_clock_init(self);
aq_ptp_tm_offset_set(self,
self->aq_hw->aq_link_status.mbps);
aq_ptp_link_change(self);
}
/* Driver has to update flow control settings on RX block
* on any link event.
* We should query FW whether it negotiated FC.
......@@ -192,6 +202,8 @@ static void aq_nic_service_task(struct work_struct *work)
service_task);
int err;
aq_ptp_service_task(self);
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
return;
......@@ -327,10 +339,27 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
}
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
err = aq_ptp_init(self, self->irqvecs - 1);
if (err < 0)
goto err_exit;
err = aq_ptp_ring_alloc(self);
if (err < 0)
goto err_exit;
err = aq_ptp_ring_init(self);
if (err < 0)
goto err_exit;
netif_carrier_off(self->ndev);
err_exit:
......@@ -361,6 +390,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
err = aq_ptp_ring_start(self);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
......@@ -388,6 +421,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
err = aq_ptp_irq_alloc(self);
if (err < 0)
goto err_exit;
if (self->aq_nic_cfg.link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
self->aq_nic_cfg.link_irq_vec);
......@@ -420,8 +457,7 @@ int aq_nic_start(struct aq_nic_s *self)
return err;
}
static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
struct sk_buff *skb,
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
unsigned int ret = 0U;
......@@ -953,10 +989,14 @@ int aq_nic_stop(struct aq_nic_s *self)
else
aq_pci_func_free_irqs(self);
aq_ptp_irq_free(self);
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
aq_ptp_ring_stop(self);
return self->aq_hw_ops->hw_stop(self->aq_hw);
}
......@@ -972,6 +1012,11 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
aq_ptp_unregister(self);
aq_ptp_ring_deinit(self);
aq_ptp_ring_free(self);
aq_ptp_free(self);
if (likely(self->aq_fw_ops->deinit)) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
......@@ -1068,3 +1113,46 @@ void aq_nic_shutdown(struct aq_nic_s *self)
err_exit:
rtnl_unlock();
}
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
{
u8 location = 0xFF;
u32 fltr_cnt;
u32 n_bit;
switch (type) {
case aq_rx_filter_ethertype:
location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
self->aq_hw_rx_fltrs.fet_reserved_count;
self->aq_hw_rx_fltrs.fet_reserved_count++;
break;
case aq_rx_filter_l3l4:
fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
location = n_bit;
break;
default:
break;
}
return location;
}
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
u32 location)
{
switch (type) {
case aq_rx_filter_ethertype:
self->aq_hw_rx_fltrs.fet_reserved_count--;
break;
case aq_rx_filter_l3l4:
self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
break;
default:
break;
}
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.h: Declaration of common code for NIC. */
......@@ -17,6 +17,8 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
struct aq_ptp_s;
enum aq_rx_filter_type;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
......@@ -53,6 +55,7 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_STOPPING 0x00000008U
#define AQ_NIC_FLAG_RESETTING 0x00000010U
#define AQ_NIC_FLAG_CLOSING 0x00000020U
#define AQ_NIC_PTP_DPATH_UP 0x02000000U
#define AQ_NIC_LINK_DOWN 0x04000000U
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
......@@ -70,6 +73,7 @@ struct aq_hw_rx_fl3l4 {
u8 active_ipv4;
u8 active_ipv6:2;
u8 is_ipv6;
u8 reserved_count;
};
struct aq_hw_rx_fltrs_s {
......@@ -77,6 +81,8 @@ struct aq_hw_rx_fltrs_s {
u16 active_filters;
struct aq_hw_rx_fl2 fl2;
struct aq_hw_rx_fl3l4 fl3l4;
/*filter ether type */
u8 fet_reserved_count;
};
struct aq_nic_s {
......@@ -108,6 +114,8 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
/* PTP support */
struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
......@@ -126,6 +134,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
......@@ -148,5 +158,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
u32 location);
#endif /* AQ_NIC_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_pci_func.c: Definition of PCI functions. */
......@@ -269,6 +269,9 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
/* Request IRQ vector for PTP */
numvecs += 1;
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
......
// SPDX-License-Identifier: GPL-2.0-only
/* aQuantia Corporation Network Driver
* Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
*/
#include "aq_phy.h"
bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
{
int err = 0;
u32 val;
err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
val, val == 0U, 10U, 100000U);
if (err < 0)
return false;
return true;
}
u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
{
u16 phy_addr = aq_hw->phy_id << 5 | mmd;
/* Set Address register. */
hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
HW_ATL_MDIO_ADDRESS_SHIFT);
/* Send Address command. */
hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
(3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
aq_mdio_busy_wait(aq_hw);
/* Send Read command. */
hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
(1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
/* Read result. */
aq_mdio_busy_wait(aq_hw);
return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
}
void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
{
u16 phy_addr = aq_hw->phy_id << 5 | mmd;
/* Set Address register. */
hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
HW_ATL_MDIO_ADDRESS_SHIFT);
/* Send Address command. */
hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
(3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
aq_mdio_busy_wait(aq_hw);
hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
HW_ATL_MDIO_WRITE_DATA_SHIFT);
/* Send Write command. */
hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
(2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
aq_mdio_busy_wait(aq_hw);
}
u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
{
int err = 0;
u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
val, val == 1U, 10U, 100000U);
if (err < 0) {
err = 0xffff;
goto err_exit;
}
err = aq_mdio_read_word(aq_hw, mmd, address);
hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
err_exit:
return err;
}
void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
{
int err = 0;
u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
val, val == 1U, 10U, 100000U);
if (err < 0)
return;
aq_mdio_write_word(aq_hw, mmd, address, data);
hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
}
bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
{
u16 val;
for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
++aq_hw->phy_id) {
/* PMA Standard Device Identifier 2: Address 1.3 */
val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
if (val != 0xffff)
return true;
}
return false;
}
bool aq_phy_init(struct aq_hw_s *aq_hw)
{
u32 dev_id;
if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
if (!aq_phy_init_phy_id(aq_hw))
return false;
/* PMA Standard Device Identifier:
* Address 1.2 = MSW,
* Address 1.3 = LSW
*/
dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
dev_id <<= 16;
dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
if (dev_id == 0xffffffff) {
aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
return false;
}
return true;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* aQuantia Corporation Network Driver
* Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
*/
#ifndef AQ_PHY_H
#define AQ_PHY_H
#include <linux/mdio.h>
#include "hw_atl/hw_atl_llh.h"
#include "hw_atl/hw_atl_llh_internal.h"
#include "aq_hw_utils.h"
#include "aq_hw.h"
#define HW_ATL_PHY_ID_MAX 32U
bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw);
u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr);
void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data);
u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address);
void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data);
bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
bool aq_phy_init(struct aq_hw_s *aq_hw);
#endif /* AQ_PHY_H */
// SPDX-License-Identifier: GPL-2.0-only
/* Aquantia Corporation Network Driver
* Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
*/
/* File aq_ptp.c:
* Definition of functions for Linux PTP support.
*/
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include "aq_nic.h"
#include "aq_ptp.h"
#include "aq_ring.h"
#include "aq_phy.h"
#include "aq_filters.h"
#define AQ_PTP_TX_TIMEOUT (HZ * 10)
#define POLL_SYNC_TIMER_MS 15
enum ptp_speed_offsets {
ptp_offset_idx_10 = 0,
ptp_offset_idx_100,
ptp_offset_idx_1000,
ptp_offset_idx_2500,
ptp_offset_idx_5000,
ptp_offset_idx_10000,
};
struct ptp_skb_ring {
struct sk_buff **buff;
spinlock_t lock;
unsigned int size;
unsigned int head;
unsigned int tail;
};
struct ptp_tx_timeout {
spinlock_t lock;
bool active;
unsigned long tx_start;
};
struct aq_ptp_s {
struct aq_nic_s *aq_nic;
struct hwtstamp_config hwtstamp_config;
spinlock_t ptp_lock;
spinlock_t ptp_ring_lock;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
atomic_t offset_egress;
atomic_t offset_ingress;
struct aq_ring_param_s ptp_ring_param;
struct ptp_tx_timeout ptp_tx_timeout;
unsigned int idx_vector;
struct napi_struct napi;
struct aq_ring_s ptp_tx;
struct aq_ring_s ptp_rx;
struct aq_ring_s hwts_rx;
struct ptp_skb_ring skb_ring;
struct aq_rx_filter_l3l4 udp_filter;
struct aq_rx_filter_l2 eth_type_filter;
struct delayed_work poll_sync;
u32 poll_timeout_ms;
bool extts_pin_enabled;
u64 last_sync1588_ts;
};
struct ptp_tm_offset {
unsigned int mbps;
int egress;
int ingress;
};
static struct ptp_tm_offset ptp_offset[6];
void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
int i, egress, ingress;
if (!aq_ptp)
return;
egress = 0;
ingress = 0;
for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
if (mbps == ptp_offset[i].mbps) {
egress = ptp_offset[i].egress;
ingress = ptp_offset[i].ingress;
break;
}
}
atomic_set(&aq_ptp->offset_egress, egress);
atomic_set(&aq_ptp->offset_ingress, ingress);
}
static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
{
unsigned int next_head = (ring->head + 1) % ring->size;
if (next_head == ring->tail)
return -ENOMEM;
ring->buff[ring->head] = skb_get(skb);
ring->head = next_head;
return 0;
}
static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&ring->lock, flags);
ret = __aq_ptp_skb_put(ring, skb);
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
{
struct sk_buff *skb;
if (ring->tail == ring->head)
return NULL;
skb = ring->buff[ring->tail];
ring->tail = (ring->tail + 1) % ring->size;
return skb;
}
static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
{
unsigned long flags;
struct sk_buff *skb;
spin_lock_irqsave(&ring->lock, flags);
skb = __aq_ptp_skb_get(ring);
spin_unlock_irqrestore(&ring->lock, flags);
return skb;
}
static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
{
unsigned long flags;
unsigned int len;
spin_lock_irqsave(&ring->lock, flags);
len = (ring->head >= ring->tail) ?
ring->head - ring->tail :
ring->size - ring->tail + ring->head;
spin_unlock_irqrestore(&ring->lock, flags);
return len;
}
static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
{
struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
spin_lock_init(&ring->lock);
ring->buff = buff;
ring->size = size;
ring->head = 0;
ring->tail = 0;
return 0;
}
static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
{
struct sk_buff *skb;
while ((skb = aq_ptp_skb_get(ring)) != NULL)
dev_kfree_skb_any(skb);
}
static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
{
if (ring->buff) {
aq_ptp_skb_ring_clean(ring);
kfree(ring->buff);
ring->buff = NULL;
}
}
static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
{
spin_lock_init(&timeout->lock);
timeout->active = false;
}
static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
{
struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
unsigned long flags;
spin_lock_irqsave(&timeout->lock, flags);
timeout->active = true;
timeout->tx_start = jiffies;
spin_unlock_irqrestore(&timeout->lock, flags);
}
static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
{
if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
unsigned long flags;
spin_lock_irqsave(&timeout->lock, flags);
timeout->active = false;
spin_unlock_irqrestore(&timeout->lock, flags);
}
}
static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
{
struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
unsigned long flags;
bool timeout_flag;
timeout_flag = false;
spin_lock_irqsave(&timeout->lock, flags);
if (timeout->active) {
timeout_flag = time_is_before_jiffies(timeout->tx_start +
AQ_PTP_TX_TIMEOUT);
/* reset active flag if timeout detected */
if (timeout_flag)
timeout->active = false;
}
spin_unlock_irqrestore(&timeout->lock, flags);
if (timeout_flag) {
aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
netdev_err(aq_ptp->aq_nic->ndev,
"PTP Timeout. Clearing Tx Timestamp SKBs\n");
}
}
/* aq_ptp_adjfine
* @ptp: the ptp clock structure
* @ppb: parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
*/
static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
mutex_lock(&aq_nic->fwreq_mutex);
aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
scaled_ppm_to_ppb(scaled_ppm));
mutex_unlock(&aq_nic->fwreq_mutex);
return 0;
}
/* aq_ptp_adjtime
* @ptp: the ptp clock structure
* @delta: offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
unsigned long flags;
spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
return 0;
}
/* aq_ptp_gettime
* @ptp: the ptp clock structure
* @ts: timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
unsigned long flags;
u64 ns;
spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
/* aq_ptp_settime
* @ptp: the ptp clock structure
* @ts: the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
*/
static int aq_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
unsigned long flags;
u64 ns = timespec64_to_ns(ts);
u64 now;
spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
return 0;
}
static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
struct skb_shared_hwtstamps *hwtstamp,
u64 timestamp)
{
memset(hwtstamp, 0, sizeof(*hwtstamp));
hwtstamp->hwtstamp = ns_to_ktime(timestamp);
}
static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
u64 period)
{
if (period)
netdev_dbg(aq_nic->ndev,
"Enable GPIO %d pulsing, start time %llu, period %u\n",
pin_index, start, (u32)period);
else
netdev_dbg(aq_nic->ndev,
"Disable GPIO %d pulsing, start time %llu, period %u\n",
pin_index, start, (u32)period);
/* Notify hardware of request to being sending pulses.
* If period is ZERO then pulsen is disabled.
*/
mutex_lock(&aq_nic->fwreq_mutex);
aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
start, (u32)period);
mutex_unlock(&aq_nic->fwreq_mutex);
return 0;
}
static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
struct ptp_clock_time *t = &rq->perout.period;
struct ptp_clock_time *s = &rq->perout.start;
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
u64 start, period;
u32 pin_index = rq->perout.index;
/* verify the request channel is there */
if (pin_index >= ptp->n_per_out)
return -EINVAL;
/* we cannot support periods greater
* than 4 seconds due to reg limit
*/
if (t->sec > 4 || t->sec < 0)
return -ERANGE;
/* convert to unsigned 64b ns,
* verify we can put it in a 32b register
*/
period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
/* verify the value is in range supported by hardware */
if (period > U32_MAX)
return -ERANGE;
/* convert to unsigned 64b ns */
/* TODO convert to AQ time */
start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
return 0;
}
static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
u64 start, period;
u32 pin_index = 0;
u32 rest = 0;
/* verify the request channel is there */
if (pin_index >= ptp->n_per_out)
return -EINVAL;
aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
div_u64_rem(start, NSEC_PER_SEC, &rest);
period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
start = on ? start - rest + NSEC_PER_SEC *
(rest > 990000000LL ? 2 : 1) : 0;
aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
return 0;
}
static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
u32 enable = aq_ptp->extts_pin_enabled;
if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
enable);
}
static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
u32 pin_index = rq->extts.index;
if (pin_index >= ptp->n_ext_ts)
return -EINVAL;
aq_ptp->extts_pin_enabled = !!on;
if (on) {
aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
cancel_delayed_work_sync(&aq_ptp->poll_sync);
schedule_delayed_work(&aq_ptp->poll_sync,
msecs_to_jiffies(aq_ptp->poll_timeout_ms));
}
aq_ptp_extts_pin_ctrl(aq_ptp);
return 0;
}
/* aq_ptp_gpio_feature_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
*/
static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
return aq_ptp_extts_pin_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT:
return aq_ptp_perout_pin_configure(ptp, rq, on);
case PTP_CLK_REQ_PPS:
return aq_ptp_pps_pin_configure(ptp, rq, on);
default:
return -EOPNOTSUPP;
}
return 0;
}
/* aq_ptp_verify
* @ptp: the ptp clock structure
* @pin: index of the pin in question
* @func: the desired function to use
* @chan: the function channel index to use
*/
static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
/* verify the requested pin is there */
if (!ptp->pin_config || pin >= ptp->n_pins)
return -EINVAL;
/* enforce locked channels, no changing them */
if (chan != ptp->pin_config[pin].chan)
return -EINVAL;
/* we want to keep the functions locked as well */
if (func != ptp->pin_config[pin].func)
return -EINVAL;
return 0;
}
/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: the private adapter struct
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the hwtstamps structure which
* is passed up the network stack
*/
void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
struct skb_shared_hwtstamps hwtstamp;
if (!skb) {
netdev_err(aq_nic->ndev, "have timestamp but tx_queus empty\n");
return;
}
timestamp += atomic_read(&aq_ptp->offset_egress);
aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
skb_tstamp_tx(skb, &hwtstamp);
dev_kfree_skb_any(skb);
aq_ptp_tx_timeout_update(aq_ptp);
}
/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @adapter: pointer to adapter struct
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the hwtstamps structure which
* is passed up the network stack
*/
static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
u64 timestamp)
{
timestamp -= atomic_read(&aq_ptp->offset_ingress);
aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
}
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
struct hwtstamp_config *config)
{
*config = aq_ptp->hwtstamp_config;
}
static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
{
aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
HW_ATL_RX_ENABLE_CMP_PROT_L4 |
HW_ATL_RX_UDP |
HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
HW_ATL_RX_ENABLE_QUEUE_L3L4 |
aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
}
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
struct hwtstamp_config *config)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
const struct aq_hw_ops *hw_ops;
int err = 0;
hw_ops = aq_nic->aq_hw_ops;
if (config->tx_type == HWTSTAMP_TX_ON ||
config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
aq_ptp_prepare_filters(aq_ptp);
if (hw_ops->hw_filter_l3l4_set) {
err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
&aq_ptp->udp_filter);
}
if (!err && hw_ops->hw_filter_l2_set) {
err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
&aq_ptp->eth_type_filter);
}
aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
} else {
aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
if (hw_ops->hw_filter_l3l4_set) {
err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
&aq_ptp->udp_filter);
}
if (!err && hw_ops->hw_filter_l2_clear) {
err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
&aq_ptp->eth_type_filter);
}
aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
}
if (err)
return -EREMOTEIO;
aq_ptp->hwtstamp_config = *config;
return 0;
}
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return false;
return &aq_ptp->ptp_tx == ring ||
&aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
}
u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
unsigned int len)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
u64 timestamp = 0;
u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
p, len, &timestamp);
if (ret > 0)
aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
return ret;
}
static int aq_ptp_poll(struct napi_struct *napi, int budget)
{
struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
bool was_cleaned = false;
int work_done = 0;
int err;
/* Processing PTP TX traffic */
err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
&aq_ptp->ptp_tx);
if (err < 0)
goto err_exit;
if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
aq_ring_tx_clean(&aq_ptp->ptp_tx);
was_cleaned = true;
}
/* Processing HW_TIMESTAMP RX traffic */
err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
&aq_ptp->hwts_rx);
if (err < 0)
goto err_exit;
if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
&aq_ptp->hwts_rx);
was_cleaned = true;
}
/* Processing PTP RX traffic */
err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
&aq_ptp->ptp_rx);
if (err < 0)
goto err_exit;
if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
unsigned int sw_tail_old;
err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
if (err < 0)
goto err_exit;
sw_tail_old = aq_ptp->ptp_rx.sw_tail;
err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
&aq_ptp->ptp_rx,
sw_tail_old);
if (err < 0)
goto err_exit;
}
if (was_cleaned)
work_done = budget;
if (work_done < budget) {
napi_complete_done(napi, work_done);
aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
1 << aq_ptp->ptp_ring_param.vec_idx);
}
err_exit:
return work_done;
}
static irqreturn_t aq_ptp_isr(int irq, void *private)
{
struct aq_ptp_s *aq_ptp = private;
int err = 0;
if (!aq_ptp) {
err = -EINVAL;
goto err_exit;
}
napi_schedule(&aq_ptp->napi);
err_exit:
return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
}
int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
struct aq_ring_s *ring = &aq_ptp->ptp_tx;
unsigned long irq_flags;
int err = NETDEV_TX_OK;
unsigned int frags;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
goto err_exit;
}
frags = skb_shinfo(skb)->nr_frags + 1;
/* Frags cannot be bigger 16KB
* because PTP usually works
* without Jumbo even in a background
*/
if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
/* Drop packet because it doesn't make sence to delay it */
dev_kfree_skb_any(skb);
goto err_exit;
}
err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
if (err) {
netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
ring->size);
return NETDEV_TX_BUSY;
}
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
aq_ptp_tx_timeout_start(aq_ptp);
skb_tx_timestamp(skb);
spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
frags = aq_nic_map_skb(aq_nic, skb, ring);
if (likely(frags)) {
err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
ring, frags);
if (err >= 0) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} else {
err = NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
err_exit:
return err;
}
void aq_ptp_service_task(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return;
aq_ptp_tx_timeout_check(aq_ptp);
}
int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
{
struct pci_dev *pdev = aq_nic->pdev;
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
int err = 0;
if (!aq_ptp)
return 0;
if (pdev->msix_enabled || pdev->msi_enabled) {
err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
} else {
err = -EINVAL;
goto err_exit;
}
err_exit:
return err;
}
void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
struct pci_dev *pdev = aq_nic->pdev;
if (!aq_ptp)
return;
free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
}
int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
int err = 0;
if (!aq_ptp)
return 0;
err = aq_ring_init(&aq_ptp->ptp_tx);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
&aq_ptp->ptp_tx,
&aq_ptp->ptp_ring_param);
if (err < 0)
goto err_exit;
err = aq_ring_init(&aq_ptp->ptp_rx);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
&aq_ptp->ptp_rx,
&aq_ptp->ptp_ring_param);
if (err < 0)
goto err_exit;
err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
if (err < 0)
goto err_rx_free;
err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
&aq_ptp->ptp_rx,
0U);
if (err < 0)
goto err_rx_free;
err = aq_ring_init(&aq_ptp->hwts_rx);
if (err < 0)
goto err_rx_free;
err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
&aq_ptp->hwts_rx,
&aq_ptp->ptp_ring_param);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
&aq_ptp->hwts_rx);
if (err < 0)
goto err_exit;
return err;
err_rx_free:
aq_ring_rx_deinit(&aq_ptp->ptp_rx);
err_exit:
return err;
}
int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
int err = 0;
if (!aq_ptp)
return 0;
err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
&aq_ptp->hwts_rx);
if (err < 0)
goto err_exit;
napi_enable(&aq_ptp->napi);
err_exit:
return err;
}
void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return;
aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
napi_disable(&aq_ptp->napi);
}
void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
return;
aq_ring_tx_clean(&aq_ptp->ptp_tx);
aq_ring_rx_deinit(&aq_ptp->ptp_rx);
}
#define PTP_8TC_RING_IDX 8
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
struct aq_ring_s *hwts = 0;
u32 tx_tc_mode, rx_tc_mode;
struct aq_ring_s *ring;
int err;
if (!aq_ptp)
return 0;
/* Index must to be 8 (8 TCs) or 16 (4 TCs).
* It depends from Traffic Class mode.
*/
aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
if (tx_tc_mode == 0)
tx_ring_idx = PTP_8TC_RING_IDX;
else
tx_ring_idx = PTP_4TC_RING_IDX;
ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
tx_ring_idx, &aq_nic->aq_nic_cfg);
if (!ring) {
err = -ENOMEM;
goto err_exit;
}
aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
if (rx_tc_mode == 0)
rx_ring_idx = PTP_8TC_RING_IDX;
else
rx_ring_idx = PTP_4TC_RING_IDX;
ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
rx_ring_idx, &aq_nic->aq_nic_cfg);
if (!ring) {
err = -ENOMEM;
goto err_exit_ptp_tx;
}
hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
aq_nic->aq_nic_cfg.rxds,
aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
if (!hwts) {
err = -ENOMEM;
goto err_exit_ptp_rx;
}
err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
if (err != 0) {
err = -ENOMEM;
goto err_exit_hwts_rx;
}
aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
&aq_ptp->ptp_ring_param.affinity_mask);
return 0;
err_exit_hwts_rx:
aq_ring_free(&aq_ptp->hwts_rx);
err_exit_ptp_rx:
aq_ring_free(&aq_ptp->ptp_rx);
err_exit_ptp_tx:
aq_ring_free(&aq_ptp->ptp_tx);
err_exit:
return err;
}
void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return;
aq_ring_free(&aq_ptp->ptp_tx);
aq_ring_free(&aq_ptp->ptp_rx);
aq_ring_free(&aq_ptp->hwts_rx);
aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
}
#define MAX_PTP_GPIO_COUNT 4
static struct ptp_clock_info aq_ptp_clock = {
.owner = THIS_MODULE,
.name = "atlantic ptp",
.max_adj = 999999999,
.n_ext_ts = 0,
.pps = 0,
.adjfine = aq_ptp_adjfine,
.adjtime = aq_ptp_adjtime,
.gettime64 = aq_ptp_gettime,
.settime64 = aq_ptp_settime,
.n_per_out = 0,
.enable = aq_ptp_gpio_feature_enable,
.n_pins = 0,
.verify = aq_ptp_verify,
.pin_config = NULL,
};
#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
ptp_offset[__idx].mbps = (__mbps); \
ptp_offset[__idx].egress = (__egress); \
ptp_offset[__idx].ingress = (__ingress); } \
while (0)
static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
{
int i;
/* Load offsets for PTP */
for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
switch (i) {
/* 100M */
case ptp_offset_idx_100:
ptp_offset_init(i, 100,
offsets->egress_100,
offsets->ingress_100);
break;
/* 1G */
case ptp_offset_idx_1000:
ptp_offset_init(i, 1000,
offsets->egress_1000,
offsets->ingress_1000);
break;
/* 2.5G */
case ptp_offset_idx_2500:
ptp_offset_init(i, 2500,
offsets->egress_2500,
offsets->ingress_2500);
break;
/* 5G */
case ptp_offset_idx_5000:
ptp_offset_init(i, 5000,
offsets->egress_5000,
offsets->ingress_5000);
break;
/* 10G */
case ptp_offset_idx_10000:
ptp_offset_init(i, 10000,
offsets->egress_10000,
offsets->ingress_10000);
break;
}
}
}
static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
{
memset(ptp_offset, 0, sizeof(ptp_offset));
aq_ptp_offset_init_from_fw(offsets);
}
static void aq_ptp_gpio_init(struct ptp_clock_info *info,
struct hw_aq_info *hw_info)
{
struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
u32 extts_pin_cnt = 0;
u32 out_pin_cnt = 0;
u32 i;
memset(pin_desc, 0, sizeof(pin_desc));
for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
if (hw_info->gpio_pin[i] ==
(GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
snprintf(pin_desc[out_pin_cnt].name,
sizeof(pin_desc[out_pin_cnt].name),
"AQ_GPIO%d", i);
pin_desc[out_pin_cnt].index = out_pin_cnt;
pin_desc[out_pin_cnt].chan = out_pin_cnt;
pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
}
}
info->n_per_out = out_pin_cnt;
if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
extts_pin_cnt += 1;
snprintf(pin_desc[out_pin_cnt].name,
sizeof(pin_desc[out_pin_cnt].name),
"AQ_GPIO%d", out_pin_cnt);
pin_desc[out_pin_cnt].index = out_pin_cnt;
pin_desc[out_pin_cnt].chan = 0;
pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
}
info->n_pins = out_pin_cnt + extts_pin_cnt;
info->n_ext_ts = extts_pin_cnt;
if (!info->n_pins)
return;
info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
GFP_KERNEL);
if (!info->pin_config)
return;
memcpy(info->pin_config, &pin_desc,
sizeof(struct ptp_pin_desc) * info->n_pins);
}
void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
struct timespec64 ts;
ktime_get_real_ts64(&ts);
aq_ptp_settime(&aq_ptp->ptp_info, &ts);
}
static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
{
struct hw_atl_utils_mbox mbox;
struct ptp_clock *clock;
struct aq_ptp_s *aq_ptp;
int err = 0;
if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
aq_nic->aq_ptp = NULL;
return 0;
}
if (!aq_nic->aq_fw_ops->enable_ptp) {
aq_nic->aq_ptp = NULL;
return 0;
}
hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
aq_nic->aq_ptp = NULL;
return 0;
}
aq_ptp_offset_init(&mbox.info.ptp_offset);
aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
if (!aq_ptp) {
err = -ENOMEM;
goto err_exit;
}
aq_ptp->aq_nic = aq_nic;
spin_lock_init(&aq_ptp->ptp_lock);
spin_lock_init(&aq_ptp->ptp_ring_lock);
aq_ptp->ptp_info = aq_ptp_clock;
aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
if (!clock || IS_ERR(clock)) {
netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
err = PTR_ERR(clock);
goto err_exit;
}
aq_ptp->ptp_clock = clock;
aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
aq_ptp->idx_vector = idx_vec;
aq_nic->aq_ptp = aq_ptp;
/* enable ptp counter */
aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
mutex_lock(&aq_nic->fwreq_mutex);
aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
aq_ptp_clock_init(aq_nic);
mutex_unlock(&aq_nic->fwreq_mutex);
INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
aq_ptp->eth_type_filter.location =
aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
aq_ptp->udp_filter.location =
aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
return 0;
err_exit:
if (aq_ptp)
kfree(aq_ptp->ptp_info.pin_config);
kfree(aq_ptp);
aq_nic->aq_ptp = NULL;
return err;
}
void aq_ptp_unregister(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return;
ptp_clock_unregister(aq_ptp->ptp_clock);
}
void aq_ptp_free(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return;
aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
aq_ptp->eth_type_filter.location);
aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
aq_ptp->udp_filter.location);
cancel_delayed_work_sync(&aq_ptp->poll_sync);
/* disable ptp */
mutex_lock(&aq_nic->fwreq_mutex);
aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
mutex_unlock(&aq_nic->fwreq_mutex);
kfree(aq_ptp->ptp_info.pin_config);
netif_napi_del(&aq_ptp->napi);
kfree(aq_ptp);
aq_nic->aq_ptp = NULL;
}
struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
{
return aq_ptp->ptp_clock;
}
/* PTP external GPIO nanoseconds count */
static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
{
u64 ts = 0;
if (aq_nic->aq_hw_ops->hw_get_sync_ts)
aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
return ts;
}
static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
{
if (aq_ptp->extts_pin_enabled) {
aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
aq_ptp->last_sync1588_ts =
aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
schedule_delayed_work(&aq_ptp->poll_sync,
msecs_to_jiffies(aq_ptp->poll_timeout_ms));
}
}
int aq_ptp_link_change(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
if (!aq_ptp)
return 0;
if (aq_nic->aq_hw->aq_link_status.mbps)
aq_ptp_start_work(aq_ptp);
else
cancel_delayed_work_sync(&aq_ptp->poll_sync);
return 0;
}
static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
u64 sync_ts2;
u64 sync_ts;
sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
if (sync_ts != aq_ptp->last_sync1588_ts) {
sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
if (sync_ts != sync_ts2) {
sync_ts = sync_ts2;
sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
if (sync_ts != sync_ts2) {
netdev_err(aq_nic->ndev,
"%s: Unable to get correct GPIO TS",
__func__);
sync_ts = 0;
}
}
*new_ts = sync_ts;
return true;
}
return false;
}
static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
u64 sync_ts;
/* Sync1588 pin was triggered */
if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
if (aq_ptp->extts_pin_enabled) {
struct ptp_clock_event ptp_event;
u64 time = 0;
aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
sync_ts, &time);
ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
ptp_event.timestamp = time;
ptp_event.type = PTP_CLOCK_EXTTS;
ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
}
aq_ptp->last_sync1588_ts = sync_ts;
}
return 0;
}
void aq_ptp_poll_sync_work_cb(struct work_struct *w)
{
struct delayed_work *dw = to_delayed_work(w);
struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
aq_ptp_check_sync1588(aq_ptp);
if (aq_ptp->extts_pin_enabled) {
unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
schedule_delayed_work(&aq_ptp->poll_sync, timeout);
}
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Aquantia Corporation Network Driver
* Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
*/
/* File aq_ptp.h: Declaration of PTP functions.
*/
#ifndef AQ_PTP_H
#define AQ_PTP_H
#include <linux/net_tstamp.h>
#include <linux/version.h>
/* Common functions */
int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec);
void aq_ptp_unregister(struct aq_nic_s *aq_nic);
void aq_ptp_free(struct aq_nic_s *aq_nic);
int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic);
void aq_ptp_irq_free(struct aq_nic_s *aq_nic);
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic);
void aq_ptp_ring_free(struct aq_nic_s *aq_nic);
int aq_ptp_ring_init(struct aq_nic_s *aq_nic);
int aq_ptp_ring_start(struct aq_nic_s *aq_nic);
void aq_ptp_ring_stop(struct aq_nic_s *aq_nic);
void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic);
void aq_ptp_service_task(struct aq_nic_s *aq_nic);
void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps);
void aq_ptp_clock_init(struct aq_nic_s *aq_nic);
/* Traffic processing functions */
int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb);
void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
/* Must be to check available of PTP before call */
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
struct hwtstamp_config *config);
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
struct hwtstamp_config *config);
/* Return either ring is belong to PTP or not*/
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
unsigned int len);
struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
int aq_ptp_link_change(struct aq_nic_s *aq_nic);
#endif /* AQ_PTP_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
......@@ -10,6 +10,7 @@
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
......@@ -177,6 +178,30 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
return self;
}
struct aq_ring_s *
aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, unsigned int size, unsigned int dx_size)
{
struct device *dev = aq_nic_get_dev(aq_nic);
size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
memset(self, 0, sizeof(*self));
self->aq_nic = aq_nic;
self->idx = idx;
self->size = size;
self->dx_size = dx_size;
self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
GFP_KERNEL);
if (!self->dx_ring) {
aq_ring_free(self);
return NULL;
}
return self;
}
int aq_ring_init(struct aq_ring_s *self)
{
self->hw_head = 0;
......@@ -290,6 +315,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
......@@ -354,6 +380,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
if (is_ptp_ring)
buff->len -=
aq_ptp_extract_ts(self->aq_nic, skb,
aq_buf_vaddr(&buff->rxdata),
buff->len);
skb_put(skb, buff->len);
page_ref_inc(buff->rxdata.page);
} else {
......@@ -362,6 +393,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
if (is_ptp_ring)
buff->len -=
aq_ptp_extract_ts(self->aq_nic, skb,
aq_buf_vaddr(&buff->rxdata),
buff->len);
hdr_len = buff->len;
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
......@@ -421,8 +457,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
skb_record_rx_queue(skb, self->idx);
/* Send all PTP traffic to 0 queue */
skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
......@@ -434,6 +470,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
return err;
}
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
while (self->sw_head != self->hw_head) {
u64 ns;
aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
self->dx_ring +
(self->sw_head * self->dx_size),
self->dx_size, &ns);
aq_ptp_tx_hwtstamp(aq_nic, ns);
self->sw_head = aq_ring_next_dx(self, self->sw_head);
}
}
int aq_ring_rx_fill(struct aq_ring_s *self)
{
unsigned int page_order = self->page_order;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
......@@ -174,4 +174,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic, unsigned int idx,
unsigned int size, unsigned int dx_size);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
#endif /* AQ_RING_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
......@@ -10,6 +10,7 @@
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "../aq_nic.h"
#include "../aq_phy.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
......@@ -49,6 +50,8 @@
.mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
#define FRAC_PER_NS 0x100000000LL
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
......@@ -124,13 +127,16 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
tc = 0;
/* TX Packet Scheduler Data TC0 */
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
/* Tx buf size TC0 */
buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
......@@ -141,10 +147,15 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
/* Init TC2 for PTP_TX */
tc = 2;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
tc);
/* QoS Rx buf size per TC */
tc = 0;
buff_size = HW_ATL_B0_RXBUF_MAX;
buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
......@@ -158,6 +169,14 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
/* Init TC2 for PTP_RX */
tc = 2;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
tc);
/* No flow control for PTP */
hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
......@@ -664,6 +683,46 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
unsigned int i;
for (i = aq_ring_avail_dx(ring); i--;
ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
struct hw_atl_rxd_s *rxd =
(struct hw_atl_rxd_s *)
&ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
rxd->hdr_addr = 0U;
}
/* Make sure descriptors are updated before bump tail*/
wmb();
hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
while (ring->hw_head != ring->sw_tail) {
struct hw_atl_rxd_hwts_wb_s *hwts_wb =
(struct hw_atl_rxd_hwts_wb_s *)
(ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
/* RxD is not done */
if (!(hwts_wb->sec_lw0 & 0x1U))
break;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
......@@ -1005,6 +1064,228 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
{
*tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
{
*tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
return aq_hw_err_from_flags(self);
}
#define get_ptp_ts_val_u64(self, indx) \
((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
{
u64 ns;
hw_atl_pcs_ptp_clock_read_enable(self, 1);
hw_atl_pcs_ptp_clock_read_enable(self, 0);
ns = (get_ptp_ts_val_u64(self, 0) +
(get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
(get_ptp_ts_val_u64(self, 3) +
(get_ptp_ts_val_u64(self, 4) << 16));
*stamp = ns + self->ptp_clk_offset;
}
static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
{
/* For accuracy, the digit is extended */
s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
u64 nsi_frac = 0;
u64 nsi;
base_ns = div64_s64(base_ns, freq);
nsi = div64_u64(base_ns, NSEC_PER_SEC);
if (base_ns != nsi * NSEC_PER_SEC) {
s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
base_ns - nsi * NSEC_PER_SEC);
nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
}
*ns = (u32)nsi;
*fns = (u32)nsi_frac;
}
static void
hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
u64 phyfreq, u64 macfreq)
{
s64 adj_fns_val;
s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
FRAC_PER_NS * ptp_adj_freq->ns_phy);
s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
FRAC_PER_NS * ptp_adj_freq->ns_mac);
s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
/* MAC MCP counter freq is macfreq / 4 */
s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
4 * FRAC_PER_NS;
diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
AQ_HW_MAC_COUNTER_HZ);
adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
FRAC_PER_NS;
}
static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
{
self->ptp_clk_offset += delta;
return 0;
}
static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
{
s64 delta = time - (self->ptp_clk_offset + ts);
return hw_atl_b0_adj_sys_clock(self, delta);
}
int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
{
*time = self->ptp_clk_offset + ts;
return 0;
}
static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
{
struct hw_fw_request_iface fwreq;
size_t size;
memset(&fwreq, 0, sizeof(fwreq));
fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
&fwreq.ptp_adj_freq.ns_mac,
&fwreq.ptp_adj_freq.fns_mac);
hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
&fwreq.ptp_adj_freq.ns_phy,
&fwreq.ptp_adj_freq.fns_phy);
hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
AQ_HW_PHY_COUNTER_HZ,
AQ_HW_MAC_COUNTER_HZ);
size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
}
static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
u64 start, u32 period)
{
struct hw_fw_request_iface fwreq;
size_t size;
memset(&fwreq, 0, sizeof(fwreq));
fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
fwreq.ptp_gpio_ctrl.index = index;
fwreq.ptp_gpio_ctrl.period = period;
/* Apply time offset */
fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
}
static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
u32 enable)
{
/* Enable/disable Sync1588 GPIO Timestamping */
aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
return 0;
}
static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
{
u64 sec_l;
u64 sec_h;
u64 nsec_l;
u64 nsec_h;
if (!ts)
return -1;
/* PTP external GPIO clock seconds count 15:0 */
sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
/* PTP external GPIO clock seconds count 31:16 */
sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
/* PTP external GPIO clock nanoseconds count 15:0 */
nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
/* PTP external GPIO clock nanoseconds count 31:16 */
nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
*ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
return 0;
}
static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
unsigned int len, u64 *timestamp)
{
unsigned int offset = 14;
struct ethhdr *eth;
u64 sec;
u8 *ptr;
u32 ns;
if (len <= offset || !timestamp)
return 0;
/* The TIMESTAMP in the end of package has following format:
* (big-endian)
* struct {
* uint64_t sec;
* uint32_t ns;
* uint16_t stream_id;
* };
*/
ptr = p + (len - offset);
memcpy(&sec, ptr, sizeof(sec));
ptr += sizeof(sec);
memcpy(&ns, ptr, sizeof(ns));
sec = be64_to_cpu(sec) & 0xffffffffffffllu;
ns = be32_to_cpu(ns);
*timestamp = sec * NSEC_PER_SEC + ns + self->ptp_clk_offset;
eth = (struct ethhdr *)p;
return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
}
static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
u64 *timestamp)
{
struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
u64 tmp, sec, ns;
sec = 0;
tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
sec += tmp;
tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
sec += tmp;
tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
sec += tmp;
tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
sec += tmp;
ns = sec * NSEC_PER_SEC + hwts_wb->ns;
if (timestamp)
*timestamp = ns + self->ptp_clk_offset;
return 0;
}
static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
......@@ -1038,7 +1319,8 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
hw_atl_b0_hw_fl3l4_clear(self, data);
if (data->cmd) {
if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
if (!data->is_ipv6) {
hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
location,
......@@ -1055,8 +1337,13 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
data->ip_src);
}
}
if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
hw_atl_rpf_l4_spd_set(self, data->p_src, location);
}
hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
return aq_hw_err_from_flags(self);
......@@ -1177,6 +1464,23 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
.hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
.hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
.hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
.hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
.hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
.hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
.hw_set_sys_clock = hw_atl_b0_set_sys_clock,
.hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
.hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
.hw_gpio_pulse = hw_atl_b0_gpio_pulse,
.hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
.hw_get_sync_ts = hw_atl_b0_get_sync_ts,
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
.extract_hwts = hw_atl_b0_extract_hwts,
.hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_fc = hw_atl_b0_set_fc,
};
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
......@@ -65,7 +65,10 @@
#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
#define HW_ATL_B0_TXBUF_MAX 160U
#define HW_ATL_B0_PTP_TXBUF_SIZE 8U
#define HW_ATL_B0_RXBUF_MAX 320U
#define HW_ATL_B0_PTP_RXBUF_SIZE 16U
#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
......@@ -572,6 +572,13 @@ void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
rx_traf_class_mode);
}
u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
}
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
......@@ -636,8 +643,8 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
rx_pkt_buff_size_per_tc);
}
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer)
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_xoff_en_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
HW_ATL_RPB_RXBXOFF_EN_MSK,
......@@ -1290,6 +1297,13 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
HW_ATL_TPB_TX_TC_MODE_MSK,
HW_ATL_TPB_TX_TC_MODE_SHIFT);
}
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
......@@ -1526,6 +1540,20 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
glb_cpu_scratch_scp);
}
void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
u32 ptp_clock_read_enable)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR,
HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK,
HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT,
ptp_clock_read_enable);
}
u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index)
{
return aq_hw_read_reg(aq_hw, HW_ATL_PCS_PTP_TS_VAL_ADDR(index));
}
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
......@@ -1616,6 +1644,11 @@ u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
}
u32 hw_atl_sem_mdio_get(struct aq_hw_s *self)
{
return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO);
}
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
{
return aq_hw_read_reg(aq_hw,
......@@ -1631,3 +1664,60 @@ u32 hw_atl_scrpad25_get(struct aq_hw_s *self)
{
return hw_atl_scrpad_get(self, 0x18);
}
void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *aq_hw, u32 value)
{
aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1), value);
}
u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1));
}
void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *aq_hw, u32 value)
{
aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2), value);
}
u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2));
}
void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *aq_hw, u32 value)
{
aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3), value);
}
u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3));
}
void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *aq_hw, u32 value)
{
aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4), value);
}
u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4));
}
void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *aq_hw, u32 value)
{
aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5), value);
}
u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5));
}
u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_MDIO_BUSY_ADR,
HW_ATL_MDIO_BUSY_MSK,
HW_ATL_MDIO_BUSY_SHIFT);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
......@@ -292,6 +292,9 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
/* get rx traffic class mode */
u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
/* set rx buffer enable */
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
......@@ -306,7 +309,8 @@ void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 buffer);
/* set rx flow control mode */
void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
......@@ -320,7 +324,8 @@ void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_xoff_en_per_tc,
u32 buffer);
/* rpf */
......@@ -605,6 +610,9 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
/* get TX Traffic Class Mode */
u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
......@@ -623,7 +631,8 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_
/* set tx packet buffer size (per tc) */
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc, u32 buffer);
u32 tx_pkt_buff_size_per_tc,
u32 buffer);
/* set tx path pad insert enable */
void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
......@@ -715,6 +724,12 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* set pci register reset disable */
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* pcs */
void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
u32 ptp_clock_read_enable);
u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
......@@ -752,9 +767,44 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest);
/* set Global MDIO Interface 1 */
void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value);
/* get Global MDIO Interface 1 */
u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw);
/* set Global MDIO Interface 2 */
void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value);
/* get Global MDIO Interface 2 */
u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw);
/* set Global MDIO Interface 3 */
void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value);
/* get Global MDIO Interface 3 */
u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw);
/* set Global MDIO Interface 4 */
void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value);
/* get Global MDIO Interface 4 */
u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw);
/* set Global MDIO Interface 5 */
void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value);
/* get Global MDIO Interface 5 */
u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw);
u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw);
/* get global microprocessor ram semaphore */
u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
/* get global microprocessor mdio semaphore */
u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
/* get global microprocessor scratch pad register */
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh_internal.h: Preprocessor definitions
......@@ -1308,6 +1308,52 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
/* RX l3_l4_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_l4_en{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_l4_en_i[0]"
*/
#define HW_ATL_RPF_L3_REG_CTRL_ADR(filter) (0x00005380 + (filter) * 0x4)
/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
* Parameter: location {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_sa0_i[31:0]"
*/
/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
#define HW_ATL_RPF_L3_SRCA_ADR(filter) (0x000053B0 + (filter) * 0x4)
/* Bitmask for bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
/* Lower bit position of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_SHIFT 0
/* Width of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_WIDTH 32
/* Default value of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
* Parameter: location {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_da0_i[31:0]"
*/
/* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
/* Bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
/* Lower bit position of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_SHIFT 0
/* Width of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_WIDTH 32
/* Default value of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
/* RX l4_sp{D}[F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
* Parameter: srcport {D} | stride size 0x4 | range [0, 7]
......@@ -2440,6 +2486,22 @@
/* default value of bitfield register write strobe */
#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
/* register address for bitfield PTP Digital Clock Read Enable */
#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR 0x00004628
/* bitmask for bitfield PTP Digital Clock Read Enable */
#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK 0x00000010
/* inverted bitmask for bitfield PTP Digital Clock Read Enable */
#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSKN 0xFFFFFFEF
/* lower bit position of bitfield PTP Digital Clock Read Enable */
#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT 4
/* width of bitfield PTP Digital Clock Read Enable */
#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH 1
/* default value of bitfield PTP Digital Clock Read Enable */
#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_DEFAULT 0x0
/* register address for ptp counter reading */
#define HW_ATL_PCS_PTP_TS_VAL_ADDR(index) (0x00004900 + (index) * 0x4)
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
* port="pif_glb_res_i"
......@@ -2532,50 +2594,121 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
* Parameter: location {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_sa0_i[31:0]"
*/
/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
/* Bitmask for bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
/* Lower bit position of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_SHIFT 0
/* Width of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_WIDTH 32
/* Default value of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
* Parameter: location {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_da0_i[31:0]"
*/
/* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
/* Bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
/* Lower bit position of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_SHIFT 0
/* Width of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_WIDTH 32
/* Default value of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
/* Preprocessor definitions for Global MDIO Interfaces
* Address: 0x00000280 + 0x4 * Number of interface
*/
#define HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN 0x00000280u
#define HW_ATL_GLB_MDIO_IFACE_N_ADR(number) \
(HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN + (((number) - 1) * 0x4))
/* MIF MDIO Busy Bitfield Definitions
* Preprocessor definitions for the bitfield "MDIO Busy".
* PORT="mdio_pif_busy_o"
*/
/* Register address for bitfield MDIO Busy */
#define HW_ATL_MDIO_BUSY_ADR 0x00000284
/* Bitmask for bitfield MDIO Busy */
#define HW_ATL_MDIO_BUSY_MSK 0x80000000
/* Inverted bitmask for bitfield MDIO Busy */
#define HW_ATL_MDIO_BUSY_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield MDIO Busy */
#define HW_ATL_MDIO_BUSY_SHIFT 31
/* Width of bitfield MDIO Busy */
#define HW_ATL_MDIO_BUSY_WIDTH 1
/* MIF MDIO Execute Operation Bitfield Definitions
* Preprocessor definitions for the bitfield "MDIO Execute Operation".
* PORT="pif_mdio_op_start_i"
*/
/* Register address for bitfield MDIO Execute Operation */
#define HW_ATL_MDIO_EXECUTE_OPERATION_ADR 0x00000284
/* Bitmask for bitfield MDIO Execute Operation */
#define HW_ATL_MDIO_EXECUTE_OPERATION_MSK 0x00008000
/* Inverted bitmask for bitfield MDIO Execute Operation */
#define HW_ATL_MDIO_EXECUTE_OPERATION_MSKN 0xFFFF7FFF
/* Lower bit position of bitfield MDIO Execute Operation */
#define HW_ATL_MDIO_EXECUTE_OPERATION_SHIFT 15
/* Width of bitfield MDIO Execute Operation */
#define HW_ATL_MDIO_EXECUTE_OPERATION_WIDTH 1
/* Default value of bitfield MDIO Execute Operation */
#define HW_ATL_MDIO_EXECUTE_OPERATION_DEFAULT 0x0
/* MIF Op Mode [1:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "Op Mode [1:0]".
* PORT="pif_mdio_mode_i[1:0]"
*/
/* Register address for bitfield Op Mode [1:0] */
#define HW_ATL_MDIO_OP_MODE_ADR 0x00000284
/* Bitmask for bitfield Op Mode [1:0] */
#define HW_ATL_MDIO_OP_MODE_MSK 0x00003000
/* Inverted bitmask for bitfield Op Mode [1:0] */
#define HW_ATL_MDIO_OP_MODE_MSKN 0xFFFFCFFF
/* Lower bit position of bitfield Op Mode [1:0] */
#define HW_ATL_MDIO_OP_MODE_SHIFT 12
/* Width of bitfield Op Mode [1:0] */
#define HW_ATL_MDIO_OP_MODE_WIDTH 2
/* Default value of bitfield Op Mode [1:0] */
#define HW_ATL_MDIO_OP_MODE_DEFAULT 0x0
/* MIF PHY address Bitfield Definitions
* Preprocessor definitions for the bitfield "PHY address".
* PORT="pif_mdio_phy_addr_i[9:0]"
*/
/* Register address for bitfield PHY address */
#define HW_ATL_MDIO_PHY_ADDRESS_ADR 0x00000284
/* Bitmask for bitfield PHY address */
#define HW_ATL_MDIO_PHY_ADDRESS_MSK 0x000003FF
/* Inverted bitmask for bitfield PHY address */
#define HW_ATL_MDIO_PHY_ADDRESS_MSKN 0xFFFFFC00
/* Lower bit position of bitfield PHY address */
#define HW_ATL_MDIO_PHY_ADDRESS_SHIFT 0
/* Width of bitfield PHY address */
#define HW_ATL_MDIO_PHY_ADDRESS_WIDTH 10
/* Default value of bitfield PHY address */
#define HW_ATL_MDIO_PHY_ADDRESS_DEFAULT 0x0
/* MIF MDIO WriteData [F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "MDIO WriteData [F:0]".
* PORT="pif_mdio_wdata_i[15:0]"
*/
/* Register address for bitfield MDIO WriteData [F:0] */
#define HW_ATL_MDIO_WRITE_DATA_ADR 0x00000288
/* Bitmask for bitfield MDIO WriteData [F:0] */
#define HW_ATL_MDIO_WRITE_DATA_MSK 0x0000FFFF
/* Inverted bitmask for bitfield MDIO WriteData [F:0] */
#define HW_ATL_MDIO_WRITE_DATA_MSKN 0xFFFF0000
/* Lower bit position of bitfield MDIO WriteData [F:0] */
#define HW_ATL_MDIO_WRITE_DATA_SHIFT 0
/* Width of bitfield MDIO WriteData [F:0] */
#define HW_ATL_MDIO_WRITE_DATA_WIDTH 16
/* Default value of bitfield MDIO WriteData [F:0] */
#define HW_ATL_MDIO_WRITE_DATA_DEFAULT 0x0
/* MIF MDIO Address [F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "MDIO Address [F:0]".
* PORT="pif_mdio_addr_i[15:0]"
*/
/* Register address for bitfield MDIO Address [F:0] */
#define HW_ATL_MDIO_ADDRESS_ADR 0x0000028C
/* Bitmask for bitfield MDIO Address [F:0] */
#define HW_ATL_MDIO_ADDRESS_MSK 0x0000FFFF
/* Inverted bitmask for bitfield MDIO Address [F:0] */
#define HW_ATL_MDIO_ADDRESS_MSKN 0xFFFF0000
/* Lower bit position of bitfield MDIO Address [F:0] */
#define HW_ATL_MDIO_ADDRESS_SHIFT 0
/* Width of bitfield MDIO Address [F:0] */
#define HW_ATL_MDIO_ADDRESS_WIDTH 16
/* Default value of bitfield MDIO Address [F:0] */
#define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0
#define HW_ATL_FW_SM_MDIO 0x0U
#define HW_ATL_FW_SM_RAM 0x2U
#endif /* HW_ATL_LLH_INTERNAL_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
......@@ -327,8 +327,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
return err;
}
static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
u32 cnt)
int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt)
{
u32 val;
int err = 0;
......@@ -964,4 +963,6 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_eee_rate = NULL,
.get_eee_rate = NULL,
.set_flow_control = NULL,
.send_fw_request = NULL,
.enable_ptp = NULL,
};
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
......@@ -44,6 +44,14 @@ struct __packed hw_atl_rxd_wb_s {
u16 vlan;
};
/* Hardware rx HW TIMESTAMP writeback */
struct __packed hw_atl_rxd_hwts_wb_s {
u32 sec_hw;
u32 ns;
u32 sec_lw0;
u32 sec_lw1;
};
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
......@@ -168,6 +176,34 @@ struct __packed hw_atl_utils_mbox_header {
u32 error;
};
struct __packed hw_aq_ptp_offset {
u16 ingress_100;
u16 egress_100;
u16 ingress_1000;
u16 egress_1000;
u16 ingress_2500;
u16 egress_2500;
u16 ingress_5000;
u16 egress_5000;
u16 ingress_10000;
u16 egress_10000;
};
enum gpio_pin_function {
GPIO_PIN_FUNCTION_NC,
GPIO_PIN_FUNCTION_VAUX_ENABLE,
GPIO_PIN_FUNCTION_EFUSE_BURN_ENABLE,
GPIO_PIN_FUNCTION_SFP_PLUS_DETECT,
GPIO_PIN_FUNCTION_TX_DISABLE,
GPIO_PIN_FUNCTION_RATE_SEL_0,
GPIO_PIN_FUNCTION_RATE_SEL_1,
GPIO_PIN_FUNCTION_TX_FAULT,
GPIO_PIN_FUNCTION_PTP0,
GPIO_PIN_FUNCTION_PTP1,
GPIO_PIN_FUNCTION_PTP2,
GPIO_PIN_FUNCTION_SIZE
};
struct __packed hw_aq_info {
u8 reserved[6];
u16 phy_fault_code;
......@@ -175,9 +211,23 @@ struct __packed hw_aq_info {
u8 cable_len;
u8 reserved1;
u32 cable_diag_data[4];
u8 reserved2[32];
struct hw_aq_ptp_offset ptp_offset;
u8 reserved2[12];
u32 caps_lo;
u32 caps_hi;
u32 reserved_datapath;
u32 reserved3[7];
u32 reserved_simpleresp[3];
u32 reserved_linkstat[7];
u32 reserved_wakes_count;
u32 reserved_eee_stat[12];
u32 tx_stuck_cnt;
u32 setting_address;
u32 setting_length;
u32 caps_ex;
enum gpio_pin_function gpio_pin[3];
u32 pcie_aer_dump[18];
u16 snr_margin[4];
};
struct __packed hw_atl_utils_mbox {
......@@ -237,6 +287,42 @@ struct __packed offload_info {
u8 buf[0];
};
/* Mailbox FW Request interface */
struct __packed hw_fw_request_ptp_gpio_ctrl {
u32 index;
u32 period;
u64 start;
};
struct __packed hw_fw_request_ptp_adj_freq {
u32 ns_mac;
u32 fns_mac;
u32 ns_phy;
u32 fns_phy;
u32 mac_ns_adj;
u32 mac_fns_adj;
};
struct __packed hw_fw_request_ptp_adj_clock {
u32 ns;
u32 sec;
int sign;
};
#define HW_AQ_FW_REQUEST_PTP_GPIO_CTRL 0x11
#define HW_AQ_FW_REQUEST_PTP_ADJ_FREQ 0x12
#define HW_AQ_FW_REQUEST_PTP_ADJ_CLOCK 0x13
struct __packed hw_fw_request_iface {
u32 msg_id;
union {
/* PTP FW Request */
struct hw_fw_request_ptp_gpio_ctrl ptp_gpio_ctrl;
struct hw_fw_request_ptp_adj_freq ptp_adj_freq;
struct hw_fw_request_ptp_adj_clock ptp_adj_clock;
};
};
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
......@@ -344,91 +430,135 @@ enum hw_atl_fw2x_rate {
FW2X_RATE_10G = 0x800,
};
/* 0x370
* Link capabilities resolution register
*/
enum hw_atl_fw2x_caps_lo {
CAPS_LO_10BASET_HD = 0x00,
CAPS_LO_10BASET_HD = 0,
CAPS_LO_10BASET_FD,
CAPS_LO_100BASETX_HD,
CAPS_LO_100BASET4_HD,
CAPS_LO_100BASET2_HD,
CAPS_LO_100BASETX_FD,
CAPS_LO_100BASETX_FD = 5,
CAPS_LO_100BASET2_FD,
CAPS_LO_1000BASET_HD,
CAPS_LO_1000BASET_FD,
CAPS_LO_2P5GBASET_FD,
CAPS_LO_5GBASET_FD,
CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
};
/* 0x374
* Status register
*/
enum hw_atl_fw2x_caps_hi {
CAPS_HI_RESERVED1 = 0x00,
CAPS_HI_RESERVED1 = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
CAPS_HI_100BASETX_EEE,
CAPS_HI_100BASETX_EEE = 5,
CAPS_HI_RESERVED3,
CAPS_HI_RESERVED4,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
CAPS_HI_5GBASET_FD_EEE,
CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
CAPS_HI_RESERVED5,
CAPS_HI_FW_REQUEST,
CAPS_HI_RESERVED6,
CAPS_HI_RESERVED7,
CAPS_HI_RESERVED8,
CAPS_HI_RESERVED8 = 15,
CAPS_HI_RESERVED9,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
CAPS_HI_PTP_AVB_EN,
CAPS_HI_PTP_AVB_EN_FW2X = 20,
CAPS_HI_MEDIA_DETECT,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
CAPS_HI_MAC_STOP,
CAPS_HI_MAC_STOP = 25,
CAPS_HI_EXT_LOOPBACK,
CAPS_HI_INT_LOOPBACK,
CAPS_HI_EFUSE_AGENT,
CAPS_HI_WOL_TIMER,
CAPS_HI_STATISTICS,
CAPS_HI_STATISTICS = 30,
CAPS_HI_TRANSACTION_ID,
};
/* 0x36C
* Control register
*/
enum hw_atl_fw2x_ctrl {
CTRL_RESERVED1 = 0x00,
CTRL_RESERVED1 = 0,
CTRL_RESERVED2,
CTRL_RESERVED3,
CTRL_PAUSE,
CTRL_ASYMMETRIC_PAUSE,
CTRL_RESERVED4,
CTRL_RESERVED4 = 5,
CTRL_RESERVED5,
CTRL_RESERVED6,
CTRL_1GBASET_FD_EEE,
CTRL_2P5GBASET_FD_EEE,
CTRL_5GBASET_FD_EEE,
CTRL_5GBASET_FD_EEE = 10,
CTRL_10GBASET_FD_EEE,
CTRL_THERMAL_SHUTDOWN,
CTRL_PHY_LOGS,
CTRL_EEE_AUTO_DISABLE,
CTRL_PFC,
CTRL_PFC = 15,
CTRL_WAKE_ON_LINK,
CTRL_CABLE_DIAG,
CTRL_TEMPERATURE,
CTRL_DOWNSHIFT,
CTRL_PTP_AVB,
CTRL_PTP_AVB = 20,
CTRL_RESERVED7,
CTRL_LINK_DROP,
CTRL_SLEEP_PROXY,
CTRL_WOL,
CTRL_MAC_STOP,
CTRL_MAC_STOP = 25,
CTRL_EXT_LOOPBACK,
CTRL_INT_LOOPBACK,
CTRL_RESERVED8,
CTRL_WOL_TIMER,
CTRL_STATISTICS,
CTRL_STATISTICS = 30,
CTRL_FORCE_RECONNECT,
};
enum hw_atl_caps_ex {
CAPS_EX_LED_CONTROL = 0,
CAPS_EX_LED0_MODE_LO,
CAPS_EX_LED0_MODE_HI,
CAPS_EX_LED1_MODE_LO,
CAPS_EX_LED1_MODE_HI,
CAPS_EX_LED2_MODE_LO = 5,
CAPS_EX_LED2_MODE_HI,
CAPS_EX_RESERVED07,
CAPS_EX_RESERVED08,
CAPS_EX_RESERVED09,
CAPS_EX_RESERVED10 = 10,
CAPS_EX_RESERVED11,
CAPS_EX_RESERVED12,
CAPS_EX_RESERVED13,
CAPS_EX_RESERVED14,
CAPS_EX_RESERVED15 = 15,
CAPS_EX_PHY_PTP_EN,
CAPS_EX_MAC_PTP_EN,
CAPS_EX_EXT_CLK_EN,
CAPS_EX_SCHED_DMA_EN,
CAPS_EX_PTP_GPIO_EN = 20,
CAPS_EX_UPDATE_SETTINGS,
CAPS_EX_PHY_CTRL_TS_PIN,
CAPS_EX_SNR_OPERATING_MARGIN,
CAPS_EX_RESERVED24,
CAPS_EX_RESERVED25 = 25,
CAPS_EX_RESERVED26,
CAPS_EX_RESERVED27,
CAPS_EX_RESERVED28,
CAPS_EX_RESERVED29,
CAPS_EX_RESERVED30 = 30,
CAPS_EX_RESERVED31
};
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
......@@ -475,6 +605,8 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt);
int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
......@@ -26,6 +26,9 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
#define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
#define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
......@@ -444,6 +447,54 @@ static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
return err;
}
static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
const struct hw_fw_request_iface *fw_req,
size_t size)
{
u32 ctrl2, orig_ctrl2;
u32 dword_cnt;
int err = 0;
u32 val;
/* Write data to drvIface Mailbox */
dword_cnt = size / sizeof(u32);
if (size % sizeof(u32))
dword_cnt++;
err = hw_atl_utils_fw_upload_dwords(self, aq_fw2x_rpc_get(self),
(void *)fw_req, dword_cnt);
if (err < 0)
goto err_exit;
/* Toggle statistics bit for FW to update */
ctrl2 = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
orig_ctrl2 = ctrl2 & BIT(CAPS_HI_FW_REQUEST);
ctrl2 = ctrl2 ^ BIT(CAPS_HI_FW_REQUEST);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, ctrl2);
/* Wait FW to report back */
err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
orig_ctrl2 != (val &
BIT(CAPS_HI_FW_REQUEST)),
1U, 10000U);
err_exit:
return err;
}
static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
{
u32 ptp_opts = aq_hw_read_reg(self, HW_ATL_FW3X_EXT_STATE_ADDR);
u32 all_ptp_features = BIT(CAPS_EX_PHY_PTP_EN) |
BIT(CAPS_EX_PTP_GPIO_EN);
if (enable)
ptp_opts |= all_ptp_features;
else
ptp_opts &= ~all_ptp_features;
aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
}
static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
......@@ -548,5 +599,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
.set_flow_control = aq_fw2x_set_flow_control,
.get_flow_control = aq_fw2x_get_flow_control
.get_flow_control = aq_fw2x_get_flow_control,
.send_fw_request = aq_fw2x_send_fw_request,
.enable_ptp = aq_fw3x_enable_ptp,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment