Commit c06e9ef6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pstore-v4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull pstore updates from Kees Cook:
 "Improvements and refactorings:

   - Improve compression handling

   - Refactor argument handling during initialization

   - Avoid needless locking for saner EFI backend handling

   - Add more kern-doc and improve debugging output"

* tag 'pstore-v4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  pstore/ram: Avoid NULL deref in ftrace merging failure path
  pstore: Convert buf_lock to semaphore
  pstore: Fix bool initialization/comparison
  pstore/ram: Do not treat empty buffers as valid
  pstore/ram: Simplify ramoops_get_next_prz() arguments
  pstore: Map PSTORE_TYPE_* to strings
  pstore: Replace open-coded << with BIT()
  pstore: Improve and update some comments and status output
  pstore/ram: Add kern-doc for struct persistent_ram_zone
  pstore/ram: Report backend assignments with finer granularity
  pstore/ram: Standardize module name in ramoops
  pstore: Avoid duplicate call of persistent_ram_zap()
  pstore: Remove needless lock during console writes
  pstore: Do not use crash buffer for decompression
parents 8d697332 8665569e
......@@ -563,8 +563,6 @@ static int nvram_pstore_init(void)
nvram_pstore_info.buf = oops_data;
nvram_pstore_info.bufsize = oops_data_sz;
spin_lock_init(&nvram_pstore_info.buf_lock);
rc = pstore_register(&nvram_pstore_info);
if (rc && (rc != -EPERM))
/* Print error only when pstore.backend == nvram */
......
......@@ -1035,7 +1035,7 @@ static ssize_t erst_reader(struct pstore_record *record)
CPER_SECTION_TYPE_MCE) == 0)
record->type = PSTORE_TYPE_MCE;
else
record->type = PSTORE_TYPE_UNKNOWN;
record->type = PSTORE_TYPE_MAX;
if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP)
record->time.tv_sec = rcd->hdr.timestamp;
......@@ -1176,7 +1176,6 @@ static int __init erst_init(void)
"Error Record Serialization Table (ERST) support is initialized.\n");
buf = kmalloc(erst_erange.size, GFP_KERNEL);
spin_lock_init(&erst_info.buf_lock);
if (buf) {
erst_info.buf = buf + sizeof(struct cper_pstore_record);
erst_info.bufsize = erst_erange.size -
......
......@@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record)
efi_name[i] = name[i];
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
!pstore_cannot_block_path(record->reason),
record->size, record->psi->buf);
preemptible(), record->size, record->psi->buf);
if (record->reason == KMSG_DUMP_OOPS)
efivar_run_worker();
......@@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void)
return -ENOMEM;
efi_pstore_info.bufsize = 1024;
spin_lock_init(&efi_pstore_info.buf_lock);
if (pstore_register(&efi_pstore_info)) {
kfree(efi_pstore_info.buf);
......
......@@ -148,7 +148,7 @@ void pstore_unregister_ftrace(void)
mutex_lock(&pstore_ftrace_lock);
if (pstore_ftrace_enabled) {
unregister_ftrace_function(&pstore_ftrace_ops);
pstore_ftrace_enabled = 0;
pstore_ftrace_enabled = false;
}
mutex_unlock(&pstore_ftrace_lock);
......
......@@ -335,53 +335,10 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
goto fail_alloc;
private->record = record;
switch (record->type) {
case PSTORE_TYPE_DMESG:
scnprintf(name, sizeof(name), "dmesg-%s-%llu%s",
scnprintf(name, sizeof(name), "%s-%s-%llu%s",
pstore_type_to_name(record->type),
record->psi->name, record->id,
record->compressed ? ".enc.z" : "");
break;
case PSTORE_TYPE_CONSOLE:
scnprintf(name, sizeof(name), "console-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_FTRACE:
scnprintf(name, sizeof(name), "ftrace-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_MCE:
scnprintf(name, sizeof(name), "mce-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_PPC_RTAS:
scnprintf(name, sizeof(name), "rtas-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_PPC_OF:
scnprintf(name, sizeof(name), "powerpc-ofw-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_PPC_COMMON:
scnprintf(name, sizeof(name), "powerpc-common-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_PMSG:
scnprintf(name, sizeof(name), "pmsg-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_PPC_OPAL:
scnprintf(name, sizeof(name), "powerpc-opal-%s-%llu",
record->psi->name, record->id);
break;
case PSTORE_TYPE_UNKNOWN:
scnprintf(name, sizeof(name), "unknown-%s-%llu",
record->psi->name, record->id);
break;
default:
scnprintf(name, sizeof(name), "type%d-%s-%llu",
record->type, record->psi->name, record->id);
break;
}
dentry = d_alloc_name(root, name);
if (!dentry)
......
......@@ -59,6 +59,19 @@ MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
"enabling this option is not safe, it may lead to further "
"corruption on Oopses)");
/* Names should be in the same order as the enum pstore_type_id */
static const char * const pstore_type_names[] = {
"dmesg",
"mce",
"console",
"ftrace",
"rtas",
"powerpc-ofw",
"powerpc-common",
"pmsg",
"powerpc-opal",
};
static int pstore_new_entry;
static void pstore_timefunc(struct timer_list *);
......@@ -104,6 +117,30 @@ void pstore_set_kmsg_bytes(int bytes)
/* Tag each group of saved records with a sequence number */
static int oopscount;
const char *pstore_type_to_name(enum pstore_type_id type)
{
BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
return "unknown";
return pstore_type_names[type];
}
EXPORT_SYMBOL_GPL(pstore_type_to_name);
enum pstore_type_id pstore_name_to_type(const char *name)
{
int i;
for (i = 0; i < PSTORE_TYPE_MAX; i++) {
if (!strcmp(pstore_type_names[i], name))
return i;
}
return PSTORE_TYPE_MAX;
}
EXPORT_SYMBOL_GPL(pstore_name_to_type);
static const char *get_reason_str(enum kmsg_dump_reason reason)
{
switch (reason) {
......@@ -124,26 +161,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
}
}
bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
{
/*
* In case of NMI path, pstore shouldn't be blocked
* regardless of reason.
/*
* Should pstore_dump() wait for a concurrent pstore_dump()? If
* not, the current pstore_dump() will report a failure to dump
* and return.
*/
static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
{
/* In NMI path, pstore shouldn't block regardless of reason. */
if (in_nmi())
return true;
switch (reason) {
/* In panic case, other cpus are stopped by smp_send_stop(). */
case KMSG_DUMP_PANIC:
/* Emergency restart shouldn't be blocked by spin lock. */
/* Emergency restart shouldn't be blocked. */
case KMSG_DUMP_EMERG:
return true;
default:
return false;
}
}
EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
static int zbufsize_deflate(size_t size)
......@@ -258,20 +296,6 @@ static int pstore_compress(const void *in, void *out,
return outlen;
}
static int pstore_decompress(void *in, void *out,
unsigned int inlen, unsigned int outlen)
{
int ret;
ret = crypto_comp_decompress(tfm, in, inlen, out, &outlen);
if (ret) {
pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
return ret;
}
return outlen;
}
static void allocate_buf_for_compression(void)
{
struct crypto_comp *ctx;
......@@ -318,7 +342,7 @@ static void allocate_buf_for_compression(void)
big_oops_buf_sz = size;
big_oops_buf = buf;
pr_info("Using compression: %s\n", zbackend->name);
pr_info("Using crash dump compression: %s\n", zbackend->name);
}
static void free_buf_for_compression(void)
......@@ -368,9 +392,8 @@ void pstore_record_init(struct pstore_record *record,
}
/*
* callback from kmsg_dump. (s2,l2) has the most recently
* written bytes, older bytes are in (s1,l1). Save as much
* as we can from the end of the buffer.
* callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
* end of the buffer.
*/
static void pstore_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
......@@ -378,23 +401,23 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned long total = 0;
const char *why;
unsigned int part = 1;
unsigned long flags = 0;
int is_locked;
int ret;
why = get_reason_str(reason);
if (pstore_cannot_block_path(reason)) {
is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
if (!is_locked) {
pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
, in_nmi() ? "NMI" : why);
if (down_trylock(&psinfo->buf_lock)) {
/* Failed to acquire lock: give up if we cannot wait. */
if (pstore_cannot_wait(reason)) {
pr_err("dump skipped in %s path: may corrupt error record\n",
in_nmi() ? "NMI" : why);
return;
}
} else {
spin_lock_irqsave(&psinfo->buf_lock, flags);
is_locked = 1;
if (down_interruptible(&psinfo->buf_lock)) {
pr_err("could not grab semaphore?!\n");
return;
}
}
oopscount++;
while (total < kmsg_bytes) {
char *dst;
......@@ -411,7 +434,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
record.part = part;
record.buf = psinfo->buf;
if (big_oops_buf && is_locked) {
if (big_oops_buf) {
dst = big_oops_buf;
dst_size = big_oops_buf_sz;
} else {
......@@ -429,7 +452,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
dst_size, &dump_size))
break;
if (big_oops_buf && is_locked) {
if (big_oops_buf) {
zipped_len = pstore_compress(dst, psinfo->buf,
header_size + dump_size,
psinfo->bufsize);
......@@ -452,8 +475,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
total += record.size;
part++;
}
if (is_locked)
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
up(&psinfo->buf_lock);
}
static struct kmsg_dumper pstore_dumper = {
......@@ -476,31 +499,14 @@ static void pstore_unregister_kmsg(void)
#ifdef CONFIG_PSTORE_CONSOLE
static void pstore_console_write(struct console *con, const char *s, unsigned c)
{
const char *e = s + c;
while (s < e) {
struct pstore_record record;
unsigned long flags;
pstore_record_init(&record, psinfo);
record.type = PSTORE_TYPE_CONSOLE;
if (c > psinfo->bufsize)
c = psinfo->bufsize;
if (oops_in_progress) {
if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
break;
} else {
spin_lock_irqsave(&psinfo->buf_lock, flags);
}
record.buf = (char *)s;
record.size = c;
psinfo->write(&record);
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
s += c;
c = e - s;
}
}
static struct console pstore_console = {
......@@ -589,6 +595,7 @@ int pstore_register(struct pstore_info *psi)
psi->write_user = pstore_write_user_compat;
psinfo = psi;
mutex_init(&psinfo->read_mutex);
sema_init(&psinfo->buf_lock, 1);
spin_unlock(&pstore_lock);
if (owner && !try_module_get(owner)) {
......@@ -656,8 +663,9 @@ EXPORT_SYMBOL_GPL(pstore_unregister);
static void decompress_record(struct pstore_record *record)
{
int ret;
int unzipped_len;
char *decompressed;
char *unzipped, *workspace;
if (!record->compressed)
return;
......@@ -668,35 +676,42 @@ static void decompress_record(struct pstore_record *record)
return;
}
/* No compression method has created the common buffer. */
/* Missing compression buffer means compression was not initialized. */
if (!big_oops_buf) {
pr_warn("no decompression buffer allocated\n");
pr_warn("no decompression method initialized!\n");
return;
}
unzipped_len = pstore_decompress(record->buf, big_oops_buf,
record->size, big_oops_buf_sz);
if (unzipped_len <= 0) {
pr_err("decompression failed: %d\n", unzipped_len);
/* Allocate enough space to hold max decompression and ECC. */
unzipped_len = big_oops_buf_sz;
workspace = kmalloc(unzipped_len + record->ecc_notice_size,
GFP_KERNEL);
if (!workspace)
return;
}
/* Build new buffer for decompressed contents. */
decompressed = kmalloc(unzipped_len + record->ecc_notice_size,
GFP_KERNEL);
if (!decompressed) {
pr_err("decompression ran out of memory\n");
/* After decompression "unzipped_len" is almost certainly smaller. */
ret = crypto_comp_decompress(tfm, record->buf, record->size,
workspace, &unzipped_len);
if (ret) {
pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
kfree(workspace);
return;
}
memcpy(decompressed, big_oops_buf, unzipped_len);
/* Append ECC notice to decompressed buffer. */
memcpy(decompressed + unzipped_len, record->buf + record->size,
memcpy(workspace + unzipped_len, record->buf + record->size,
record->ecc_notice_size);
/* Swap out compresed contents with decompressed contents. */
/* Copy decompressed contents into an minimum-sized allocation. */
unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
GFP_KERNEL);
kfree(workspace);
if (!unzipped)
return;
/* Swap out compressed contents with decompressed contents. */
kfree(record->buf);
record->buf = decompressed;
record->buf = unzipped;
record->size = unzipped_len;
record->compressed = false;
}
......
......@@ -124,19 +124,17 @@ static int ramoops_pstore_open(struct pstore_info *psi)
}
static struct persistent_ram_zone *
ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
u64 *id,
enum pstore_type_id *typep, enum pstore_type_id type,
bool update)
ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
struct pstore_record *record)
{
struct persistent_ram_zone *prz;
int i = (*c)++;
bool update = (record->type == PSTORE_TYPE_DMESG);
/* Give up if we never existed or have hit the end. */
if (!przs || i >= max)
if (!przs)
return NULL;
prz = przs[i];
prz = przs[id];
if (!prz)
return NULL;
......@@ -147,8 +145,8 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
if (!persistent_ram_old_size(prz))
return NULL;
*typep = type;
*id = i;
record->type = prz->type;
record->id = id;
return prz;
}
......@@ -255,10 +253,8 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
/* Find the next valid persistent_ram_zone for DMESG */
while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) {
prz = ramoops_get_next_prz(cxt->dprzs, &cxt->dump_read_cnt,
cxt->max_dump_cnt, &record->id,
&record->type,
PSTORE_TYPE_DMESG, 1);
prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++,
record);
if (!prz_ok(prz))
continue;
header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz),
......@@ -272,22 +268,18 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
}
}
if (!prz_ok(prz))
prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
1, &record->id, &record->type,
PSTORE_TYPE_CONSOLE, 0);
if (!prz_ok(prz) && !cxt->console_read_cnt++)
prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record);
if (!prz_ok(prz))
prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
1, &record->id, &record->type,
PSTORE_TYPE_PMSG, 0);
if (!prz_ok(prz) && !cxt->pmsg_read_cnt++)
prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record);
/* ftrace is last since it may want to dynamically allocate memory. */
if (!prz_ok(prz)) {
if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
prz = ramoops_get_next_prz(cxt->fprzs,
&cxt->ftrace_read_cnt, 1, &record->id,
&record->type, PSTORE_TYPE_FTRACE, 0);
if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) &&
!cxt->ftrace_read_cnt++) {
prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */,
record);
} else {
/*
* Build a new dummy record which combines all the
......@@ -299,15 +291,12 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
GFP_KERNEL);
if (!tmp_prz)
return -ENOMEM;
prz = tmp_prz;
free_prz = true;
while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) {
prz_next = ramoops_get_next_prz(cxt->fprzs,
&cxt->ftrace_read_cnt,
cxt->max_ftrace_cnt,
&record->id,
&record->type,
PSTORE_TYPE_FTRACE, 0);
cxt->ftrace_read_cnt++, record);
if (!prz_ok(prz_next))
continue;
......@@ -321,7 +310,6 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
goto out;
}
record->id = 0;
prz = tmp_prz;
}
}
......@@ -611,6 +599,7 @@ static int ramoops_init_przs(const char *name,
goto fail;
}
*paddr += zone_sz;
prz_ar[i]->type = pstore_name_to_type(name);
}
*przs = prz_ar;
......@@ -640,7 +629,7 @@ static int ramoops_init_prz(const char *name,
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
cxt->memtype, 0, label);
cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
......@@ -649,9 +638,8 @@ static int ramoops_init_prz(const char *name,
return err;
}
persistent_ram_zap(*prz);
*paddr += sz;
(*prz)->type = pstore_name_to_type(name);
return 0;
}
......@@ -787,7 +775,7 @@ static int ramoops_probe(struct platform_device *pdev)
dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
- cxt->pmsg_size;
err = ramoops_init_przs("dump", dev, cxt, &cxt->dprzs, &paddr,
err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr,
dump_mem_sz, cxt->record_size,
&cxt->max_dump_cnt, 0, 0);
if (err)
......@@ -827,7 +815,6 @@ static int ramoops_probe(struct platform_device *pdev)
err = -ENOMEM;
goto fail_clear;
}
spin_lock_init(&cxt->pstore.buf_lock);
cxt->pstore.flags = PSTORE_FLAGS_DMESG;
if (cxt->console_size)
......@@ -855,9 +842,9 @@ static int ramoops_probe(struct platform_device *pdev)
ramoops_pmsg_size = pdata->pmsg_size;
ramoops_ftrace_size = pdata->ftrace_size;
pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n",
pr_info("using 0x%lx@0x%llx, ecc: %d\n",
cxt->size, (unsigned long long)cxt->phys_addr,
cxt->ecc_info.ecc_size, cxt->ecc_info.block_size);
cxt->ecc_info.ecc_size);
return 0;
......
......@@ -12,7 +12,7 @@
*
*/
#define pr_fmt(fmt) "persistent_ram: " fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/err.h>
......@@ -29,6 +29,16 @@
#include <linux/vmalloc.h>
#include <asm/page.h>
/**
* struct persistent_ram_buffer - persistent circular RAM buffer
*
* @sig:
* signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
* @start:
* offset into @data where the beginning of the stored bytes begin
* @size:
* number of valid bytes stored in @data
*/
struct persistent_ram_buffer {
uint32_t sig;
atomic_t start;
......@@ -443,7 +453,8 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
void *va;
if (!request_mem_region(start, size, label ?: "ramoops")) {
pr_err("request mem region (0x%llx@0x%llx) failed\n",
pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
label ?: "ramoops",
(unsigned long long)size, (unsigned long long)start);
return NULL;
}
......@@ -489,31 +500,41 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
struct persistent_ram_ecc_info *ecc_info)
{
int ret;
bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
ret = persistent_ram_init_ecc(prz, ecc_info);
if (ret)
if (ret) {
pr_warn("ECC failed %s\n", prz->label);
return ret;
}
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
if (buffer_size(prz) == 0) {
pr_debug("found existing empty buffer\n");
return 0;
}
if (buffer_size(prz) > prz->buffer_size ||
buffer_start(prz) > buffer_size(prz))
buffer_start(prz) > buffer_size(prz)) {
pr_info("found existing invalid buffer, size %zu, start %zu\n",
buffer_size(prz), buffer_start(prz));
else {
zap = true;
} else {
pr_debug("found existing buffer, size %zu, start %zu\n",
buffer_size(prz), buffer_start(prz));
persistent_ram_save_old(prz);
return 0;
}
} else {
pr_debug("no valid data in buffer (sig = 0x%08x)\n",
prz->buffer->sig);
prz->buffer->sig = sig;
zap = true;
}
/* Rewind missing or invalid memory area. */
prz->buffer->sig = sig;
/* Reset missing, invalid, or single-use memory area. */
if (zap)
persistent_ram_zap(prz);
return 0;
......@@ -572,6 +593,12 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
if (ret)
goto err;
pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
prz->label, prz->size, (unsigned long long)prz->paddr,
sizeof(*prz->buffer), prz->buffer_size,
prz->size - sizeof(*prz->buffer) - prz->buffer_size,
prz->ecc_info.ecc_size, prz->ecc_info.block_size);
return prz;
err:
persistent_ram_free(prz);
......
......@@ -26,27 +26,38 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/semaphore.h>
#include <linux/time.h>
#include <linux/types.h>
struct module;
/* pstore record types (see fs/pstore/inode.c for filename templates) */
/*
* pstore record types (see fs/pstore/platform.c for pstore_type_names[])
* These values may be written to storage (see EFI vars backend), so
* they are kind of an ABI. Be careful changing the mappings.
*/
enum pstore_type_id {
/* Frontend storage types */
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
PSTORE_TYPE_CONSOLE = 2,
PSTORE_TYPE_FTRACE = 3,
/* PPC64 partition types */
/* PPC64-specific partition types */
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
PSTORE_TYPE_PPC_OPAL = 8,
PSTORE_TYPE_UNKNOWN = 255
/* End of the list */
PSTORE_TYPE_MAX
};
const char *pstore_type_to_name(enum pstore_type_id type);
enum pstore_type_id pstore_name_to_type(const char *name);
struct pstore_info;
/**
* struct pstore_record - details of a pstore record entry
......@@ -85,10 +96,10 @@ struct pstore_record {
/**
* struct pstore_info - backend pstore driver structure
*
* @owner: module which is repsonsible for this backend driver
* @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
* @buf_lock: spinlock to serialize access to @buf
* @buf_lock: semaphore to serialize access to @buf
* @buf: preallocated crash dump buffer
* @bufsize: size of @buf available for crash dump bytes (must match
* smallest number of bytes available for writing to a
......@@ -173,7 +184,7 @@ struct pstore_info {
struct module *owner;
char *name;
spinlock_t buf_lock;
struct semaphore buf_lock;
char *buf;
size_t bufsize;
......@@ -192,14 +203,13 @@ struct pstore_info {
};
/* Supported frontends */
#define PSTORE_FLAGS_DMESG (1 << 0)
#define PSTORE_FLAGS_CONSOLE (1 << 1)
#define PSTORE_FLAGS_FTRACE (1 << 2)
#define PSTORE_FLAGS_PMSG (1 << 3)
#define PSTORE_FLAGS_DMESG BIT(0)
#define PSTORE_FLAGS_CONSOLE BIT(1)
#define PSTORE_FLAGS_FTRACE BIT(2)
#define PSTORE_FLAGS_PMSG BIT(3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
struct pstore_ftrace_record {
unsigned long ip;
......
......@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pstore.h>
#include <linux/types.h>
/*
......@@ -30,6 +31,11 @@
* PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
*/
#define PRZ_FLAG_NO_LOCK BIT(0)
/*
* If a PRZ should only have a single-boot lifetime, this marks it as
* getting wiped after its contents get copied out after boot.
*/
#define PRZ_FLAG_ZAP_OLD BIT(1)
struct persistent_ram_buffer;
struct rs_control;
......@@ -42,17 +48,55 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
/**
* struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
* used as a pstore backend
*
* @paddr: physical address of the mapped RAM area
* @size: size of mapping
* @label: unique name of this PRZ
* @type: frontend type for this PRZ
* @flags: holds PRZ_FLAGS_* bits
*
* @buffer_lock:
* locks access to @buffer "size" bytes and "start" offset
* @buffer:
* pointer to actual RAM area managed by this PRZ
* @buffer_size:
* bytes in @buffer->data (not including any trailing ECC bytes)
*
* @par_buffer:
* pointer into @buffer->data containing ECC bytes for @buffer->data
* @par_header:
* pointer into @buffer->data containing ECC bytes for @buffer header
* (i.e. all fields up to @data)
* @rs_decoder:
* RSLIB instance for doing ECC calculations
* @corrected_bytes:
* ECC corrected bytes accounting since boot
* @bad_blocks:
* ECC uncorrectable bytes accounting since boot
* @ecc_info:
* ECC configuration details
*
* @old_log:
* saved copy of @buffer->data prior to most recent wipe
* @old_log_size:
* bytes contained in @old_log
*
*/
struct persistent_ram_zone {
phys_addr_t paddr;
size_t size;
void *vaddr;
char *label;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
enum pstore_type_id type;
u32 flags;
raw_spinlock_t buffer_lock;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
/* ECC correction */
char *par_buffer;
char *par_header;
struct rs_control *rs_decoder;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment