Commit 7eb792bf authored by Adam Buchbinder's avatar Adam Buchbinder Committed by Martin Schwidefsky

s390: Fix misspellings in comments

Signed-off-by: default avatarAdam Buchbinder <adam.buchbinder@gmail.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 1e133ab2
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define PMU_F_ERR_LSDA 0x0200 #define PMU_F_ERR_LSDA 0x0200
#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
/* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */
extern __init const struct attribute_group **cpumf_cf_event_group(void); extern __init const struct attribute_group **cpumf_cf_event_group(void);
extern ssize_t cpumf_events_sysfs_show(struct device *dev, extern ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* This should be totally fair - if anything is waiting, a process that wants a * This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is * lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only * released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the * that will be woken up; if there's a bunch of consecutive readers at the
* front, then they'll all be woken up, but no other readers will be. * front, then they'll all be woken up, but no other readers will be.
*/ */
......
...@@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Validate the counter that is assigned to this event. /* Validate the counter that is assigned to this event.
* Because the counter facility can use numerous counters at the * Because the counter facility can use numerous counters at the
* same time without constraints, it is not necessary to explicity * same time without constraints, it is not necessary to explicitly
* validate event groups (event->group_leader != event). * validate event groups (event->group_leader != event).
*/ */
err = validate_event(hwc); err = validate_event(hwc);
......
...@@ -238,7 +238,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, ...@@ -238,7 +238,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]); dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
} }
/* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev, ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page) struct device_attribute *attr, char *page)
{ {
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Extends the address range given by *start and *stop to include the address * Extends the address range given by *start and *stop to include the address
* range starting with estart and the length len. Takes care of overflowing * range starting with estart and the length len. Takes care of overflowing
* intervals and tries to minimize the overall intervall size. * intervals and tries to minimize the overall interval size.
*/ */
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
{ {
...@@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu) ...@@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
return; return;
/* /*
* If the guest is not interrested in branching events, we can savely * If the guest is not interested in branching events, we can safely
* limit them to the PER address range. * limit them to the PER address range.
*/ */
if (!(*cr9 & PER_EVENT_BRANCH)) if (!(*cr9 & PER_EVENT_BRANCH))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment