Commit 28651e6c authored by Andrew Jeffery's avatar Andrew Jeffery Committed by Corey Minyard

ipmi: kcs_bmc: Allow clients to control KCS IRQ state

Add a mechanism for controlling whether the client associated with a
KCS device will receive Input Buffer Full (IBF) and Output Buffer Empty
(OBE) events. This enables an abstract implementation of poll() for KCS
devices.

A wart in the implementation is that the ASPEED KCS devices don't
support an OBE interrupt for the BMC. Instead we pretend it has one by
polling the status register waiting for the Output Buffer Full (OBF) bit
to clear, and generating an event when OBE is observed.

Cc: CS20 KWLiu <KWLIU@nuvoton.com>
Signed-off-by: default avatarAndrew Jeffery <andrew@aj.id.au>
Reviewed-by: default avatarZev Weiss <zweiss@equinix.com>
Message-Id: <20210608104757.582199-10-andrew@aj.id.au>
Signed-off-by: default avatarCorey Minyard <cminyard@mvista.com>
parent 7cafff99
......@@ -182,6 +182,12 @@ void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv)
}
EXPORT_SYMBOL(kcs_bmc_unregister_driver);
void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events)
{
kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events);
}
EXPORT_SYMBOL(kcs_bmc_update_event_mask);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
......
......@@ -8,6 +8,9 @@
#include <linux/list.h>
#define KCS_BMC_EVENT_TYPE_OBE BIT(0)
#define KCS_BMC_EVENT_TYPE_IBF BIT(1)
#define KCS_BMC_STR_OBF BIT(0)
#define KCS_BMC_STR_IBF BIT(1)
#define KCS_BMC_STR_CMD_DAT BIT(3)
......
......@@ -60,10 +60,18 @@
#define LPC_ODR4 0x118
#define LPC_STR4 0x11C
#define OBE_POLL_PERIOD (HZ / 2)
struct aspeed_kcs_bmc {
struct kcs_bmc_device kcs_bmc;
struct regmap *map;
struct {
spinlock_t lock;
bool remove;
struct timer_list timer;
} obe;
};
struct aspeed_kcs_of_ops {
......@@ -159,68 +167,89 @@ static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enabl
switch (kcs_bmc->channel) {
case 1:
if (enable) {
regmap_update_bits(priv->map, LPC_HICR2,
LPC_HICR2_IBFIF1, LPC_HICR2_IBFIF1);
regmap_update_bits(priv->map, LPC_HICR0,
LPC_HICR0_LPC1E, LPC_HICR0_LPC1E);
} else {
regmap_update_bits(priv->map, LPC_HICR0,
LPC_HICR0_LPC1E, 0);
regmap_update_bits(priv->map, LPC_HICR2,
LPC_HICR2_IBFIF1, 0);
}
break;
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
return;
case 2:
if (enable) {
regmap_update_bits(priv->map, LPC_HICR2,
LPC_HICR2_IBFIF2, LPC_HICR2_IBFIF2);
regmap_update_bits(priv->map, LPC_HICR0,
LPC_HICR0_LPC2E, LPC_HICR0_LPC2E);
} else {
regmap_update_bits(priv->map, LPC_HICR0,
LPC_HICR0_LPC2E, 0);
regmap_update_bits(priv->map, LPC_HICR2,
LPC_HICR2_IBFIF2, 0);
}
break;
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
return;
case 3:
if (enable) {
regmap_update_bits(priv->map, LPC_HICR2,
LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3);
regmap_update_bits(priv->map, LPC_HICR0,
LPC_HICR0_LPC3E, LPC_HICR0_LPC3E);
regmap_update_bits(priv->map, LPC_HICR4,
LPC_HICR4_KCSENBL, LPC_HICR4_KCSENBL);
} else {
regmap_update_bits(priv->map, LPC_HICR0,
LPC_HICR0_LPC3E, 0);
regmap_update_bits(priv->map, LPC_HICR4,
LPC_HICR4_KCSENBL, 0);
regmap_update_bits(priv->map, LPC_HICR2,
LPC_HICR2_IBFIF3, 0);
}
break;
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
regmap_update_bits(priv->map, LPC_HICR4,
LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
return;
case 4:
if (enable)
regmap_update_bits(priv->map, LPC_HICRB,
LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E);
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
return;
default:
pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
return;
}
}
static void aspeed_kcs_check_obe(struct timer_list *timer)
{
struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
unsigned long flags;
u8 str;
spin_lock_irqsave(&priv->obe.lock, flags);
if (priv->obe.remove) {
spin_unlock_irqrestore(&priv->obe.lock, flags);
return;
}
str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
if (str & KCS_BMC_STR_OBF) {
mod_timer(timer, jiffies + OBE_POLL_PERIOD);
spin_unlock_irqrestore(&priv->obe.lock, flags);
return;
}
spin_unlock_irqrestore(&priv->obe.lock, flags);
kcs_bmc_handle_event(&priv->kcs_bmc);
}
static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
/* We don't have an OBE IRQ, emulate it */
if (mask & KCS_BMC_EVENT_TYPE_OBE) {
if (KCS_BMC_EVENT_TYPE_OBE & state)
mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
else
regmap_update_bits(priv->map, LPC_HICRB,
LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
0);
break;
del_timer(&priv->obe.timer);
}
default:
break;
if (mask & KCS_BMC_EVENT_TYPE_IBF) {
const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
switch (kcs_bmc->channel) {
case 1:
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIF1,
enable * LPC_HICR2_IBFIF1);
return;
case 2:
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIF2,
enable * LPC_HICR2_IBFIF2);
return;
case 3:
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIF3,
enable * LPC_HICR2_IBFIF3);
return;
case 4:
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIF4,
enable * LPC_HICRB_IBFIF4);
return;
default:
pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
return;
}
}
}
static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
.irq_mask_update = aspeed_kcs_irq_mask_update,
.io_inputb = aspeed_kcs_inb,
.io_outputb = aspeed_kcs_outb,
.io_updateb = aspeed_kcs_updateb,
......@@ -375,6 +404,10 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
return -ENODEV;
}
spin_lock_init(&priv->obe.lock);
priv->obe.remove = false;
timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
aspeed_kcs_set_address(kcs_bmc, addr);
rc = aspeed_kcs_config_irq(kcs_bmc, pdev);
......@@ -383,6 +416,8 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE),
KCS_BMC_EVENT_TYPE_IBF);
aspeed_kcs_enable_channel(kcs_bmc, true);
rc = kcs_bmc_add_device(&priv->kcs_bmc);
......@@ -403,6 +438,15 @@ static int aspeed_kcs_remove(struct platform_device *pdev)
kcs_bmc_remove_device(kcs_bmc);
aspeed_kcs_enable_channel(kcs_bmc, false);
aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
/* Make sure it's proper dead */
spin_lock_irq(&priv->obe.lock);
priv->obe.remove = true;
spin_unlock_irq(&priv->obe.lock);
del_timer_sync(&priv->obe.timer);
return 0;
}
......
......@@ -35,6 +35,8 @@ void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv);
int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events);
u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc);
void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data);
u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc);
......
......@@ -9,6 +9,7 @@
#include "kcs_bmc.h"
struct kcs_bmc_device_ops {
void (*irq_mask_update)(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 enable);
u8 (*io_inputb)(struct kcs_bmc_device *kcs_bmc, u32 reg);
void (*io_outputb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 b);
void (*io_updateb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 b);
......
......@@ -38,6 +38,7 @@
#define KCS2CTL 0x2A
#define KCS3CTL 0x3C
#define KCS_CTL_IBFIE BIT(0)
#define KCS_CTL_OBEIE BIT(1)
#define KCS1IE 0x1C
#define KCS2IE 0x2E
......@@ -117,13 +118,23 @@ static void npcm7xx_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enab
{
struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
enable ? KCS_CTL_IBFIE : 0);
regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0);
}
static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
if (mask & KCS_BMC_EVENT_TYPE_OBE)
regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE,
!!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE);
if (mask & KCS_BMC_EVENT_TYPE_IBF)
regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
!!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE);
}
static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
{
struct kcs_bmc_device *kcs_bmc = arg;
......@@ -146,6 +157,7 @@ static int npcm7xx_kcs_config_irq(struct kcs_bmc_device *kcs_bmc,
}
static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = {
.irq_mask_update = npcm7xx_kcs_irq_mask_update,
.io_inputb = npcm7xx_kcs_inb,
.io_outputb = npcm7xx_kcs_outb,
.io_updateb = npcm7xx_kcs_updateb,
......@@ -186,11 +198,14 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
npcm7xx_kcs_enable_channel(kcs_bmc, true);
rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev);
if (rc)
return rc;
npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE),
KCS_BMC_EVENT_TYPE_IBF);
npcm7xx_kcs_enable_channel(kcs_bmc, true);
rc = kcs_bmc_add_device(kcs_bmc);
if (rc) {
dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
......@@ -211,6 +226,9 @@ static int npcm7xx_kcs_remove(struct platform_device *pdev)
kcs_bmc_remove_device(kcs_bmc);
npcm7xx_kcs_enable_channel(kcs_bmc, false);
npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment