Commit 45aab7d4 authored by Michael Hayes's avatar Michael Hayes Committed by Linus Torvalds

[PATCH] Spelling fixes: guarantee

    guarentee -> guarantee
    guarenteed -> guaranteed
    guarentees -> guarantees
parent 943a8df8
...@@ -318,7 +318,7 @@ pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, ...@@ -318,7 +318,7 @@ pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
SIZE must match what was provided for in a previous pci_map_single SIZE must match what was provided for in a previous pci_map_single
call. All other usages are undefined. After this call, reads by call. All other usages are undefined. After this call, reads by
the cpu to the buffer are guarenteed to see whatever the device the cpu to the buffer are guaranteed to see whatever the device
wrote there. */ wrote there. */
void void
......
...@@ -359,7 +359,7 @@ mark_clean (void *addr, size_t size) ...@@ -359,7 +359,7 @@ mark_clean (void *addr, size_t size)
* was provided for in a previous swiotlb_map_single call. All other usages are * was provided for in a previous swiotlb_map_single call. All other usages are
* undefined. * undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see whatever the * After this call, reads by the cpu to the buffer are guaranteed to see whatever the
* device wrote there. * device wrote there.
*/ */
void void
......
...@@ -74,7 +74,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, ...@@ -74,7 +74,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
......
...@@ -599,7 +599,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, ...@@ -599,7 +599,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
......
...@@ -1042,7 +1042,7 @@ static struct thread_info *sun4c_alloc_thread_info(void) ...@@ -1042,7 +1042,7 @@ static struct thread_info *sun4c_alloc_thread_info(void)
get_locked_segment(addr); get_locked_segment(addr);
/* We are changing the virtual color of the page(s) /* We are changing the virtual color of the page(s)
* so we must flush the cache to guarentee consistency. * so we must flush the cache to guarantee consistency.
*/ */
sun4c_flush_page(pages); sun4c_flush_page(pages);
#ifndef CONFIG_SUN4 #ifndef CONFIG_SUN4
......
...@@ -571,7 +571,7 @@ unsigned long __init cheetah_tune_scheduling(void) ...@@ -571,7 +571,7 @@ unsigned long __init cheetah_tune_scheduling(void)
unsigned long flush_linesize = ecache_flush_linesize; unsigned long flush_linesize = ecache_flush_linesize;
unsigned long flush_size = ecache_flush_size; unsigned long flush_size = ecache_flush_size;
/* Run through the whole cache to guarentee the timed loop /* Run through the whole cache to guarantee the timed loop
* is really displacing cache lines. * is really displacing cache lines.
*/ */
__asm__ __volatile__("1: subcc %0, %4, %0\n\t" __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
......
...@@ -416,7 +416,7 @@ U3copy_from_user_toosmall: ...@@ -416,7 +416,7 @@ U3copy_from_user_toosmall:
2: VISEntryHalf ! MS+MS 2: VISEntryHalf ! MS+MS
/* Compute (len - (len % 8)) into %g2. This is guarenteed /* Compute (len - (len % 8)) into %g2. This is guaranteed
* to be nonzero. * to be nonzero.
*/ */
andn %o2, 0x7, %g2 ! A0 Group andn %o2, 0x7, %g2 ! A0 Group
...@@ -425,7 +425,7 @@ U3copy_from_user_toosmall: ...@@ -425,7 +425,7 @@ U3copy_from_user_toosmall:
* one 8-byte longword past the end of src. It actually * one 8-byte longword past the end of src. It actually
* does not, as %g2 is subtracted as loads are done from * does not, as %g2 is subtracted as loads are done from
* src, so we always stop before running off the end. * src, so we always stop before running off the end.
* Also, we are guarenteed to have at least 0x10 bytes * Also, we are guaranteed to have at least 0x10 bytes
* to move here. * to move here.
*/ */
sub %g2, 0x8, %g2 ! A0 Group (reg-dep) sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
......
...@@ -447,7 +447,7 @@ U3copy_in_user_toosmall: ...@@ -447,7 +447,7 @@ U3copy_in_user_toosmall:
2: VISEntryHalf ! MS+MS 2: VISEntryHalf ! MS+MS
/* Compute (len - (len % 8)) into %g2. This is guarenteed /* Compute (len - (len % 8)) into %g2. This is guaranteed
* to be nonzero. * to be nonzero.
*/ */
andn %o2, 0x7, %g2 ! A0 Group andn %o2, 0x7, %g2 ! A0 Group
...@@ -456,7 +456,7 @@ U3copy_in_user_toosmall: ...@@ -456,7 +456,7 @@ U3copy_in_user_toosmall:
* one 8-byte longword past the end of src. It actually * one 8-byte longword past the end of src. It actually
* does not, as %g2 is subtracted as loads are done from * does not, as %g2 is subtracted as loads are done from
* src, so we always stop before running off the end. * src, so we always stop before running off the end.
* Also, we are guarenteed to have at least 0x10 bytes * Also, we are guaranteed to have at least 0x10 bytes
* to move here. * to move here.
*/ */
sub %g2, 0x8, %g2 ! A0 Group (reg-dep) sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
......
...@@ -463,7 +463,7 @@ U3copy_to_user_toosmall: ...@@ -463,7 +463,7 @@ U3copy_to_user_toosmall:
2: VISEntryHalf ! MS+MS 2: VISEntryHalf ! MS+MS
/* Compute (len - (len % 8)) into %g2. This is guarenteed /* Compute (len - (len % 8)) into %g2. This is guaranteed
* to be nonzero. * to be nonzero.
*/ */
andn %o2, 0x7, %g2 ! A0 Group andn %o2, 0x7, %g2 ! A0 Group
...@@ -472,7 +472,7 @@ U3copy_to_user_toosmall: ...@@ -472,7 +472,7 @@ U3copy_to_user_toosmall:
* one 8-byte longword past the end of src. It actually * one 8-byte longword past the end of src. It actually
* does not, as %g2 is subtracted as loads are done from * does not, as %g2 is subtracted as loads are done from
* src, so we always stop before running off the end. * src, so we always stop before running off the end.
* Also, we are guarenteed to have at least 0x10 bytes * Also, we are guaranteed to have at least 0x10 bytes
* to move here. * to move here.
*/ */
sub %g2, 0x8, %g2 ! A0 Group (reg-dep) sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
......
...@@ -344,7 +344,7 @@ U3memcpy_toosmall: ...@@ -344,7 +344,7 @@ U3memcpy_toosmall:
2: VISEntryHalf ! MS+MS 2: VISEntryHalf ! MS+MS
/* Compute (len - (len % 8)) into %g2. This is guarenteed /* Compute (len - (len % 8)) into %g2. This is guaranteed
* to be nonzero. * to be nonzero.
*/ */
andn %o2, 0x7, %g2 ! A0 Group andn %o2, 0x7, %g2 ! A0 Group
...@@ -353,7 +353,7 @@ U3memcpy_toosmall: ...@@ -353,7 +353,7 @@ U3memcpy_toosmall:
* one 8-byte longword past the end of src. It actually * one 8-byte longword past the end of src. It actually
* does not, as %g2 is subtracted as loads are done from * does not, as %g2 is subtracted as loads are done from
* src, so we always stop before running off the end. * src, so we always stop before running off the end.
* Also, we are guarenteed to have at least 0x10 bytes * Also, we are guaranteed to have at least 0x10 bytes
* to move here. * to move here.
*/ */
sub %g2, 0x8, %g2 ! A0 Group (reg-dep) sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
......
...@@ -210,7 +210,7 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) ...@@ -210,7 +210,7 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
stk = &sa->cmd_stack; stk = &sa->cmd_stack;
size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
// pci_alloc_consistent guarentees 32-bit DMA address will // pci_alloc_consistent guarantees 32-bit DMA address will
// be used // be used
stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
......
...@@ -1882,7 +1882,7 @@ static int do_con_write(struct tty_struct * tty, int from_user, ...@@ -1882,7 +1882,7 @@ static int do_con_write(struct tty_struct * tty, int from_user,
buf = con_buf; buf = con_buf;
} }
/* At this point 'buf' is guarenteed to be a kernel buffer /* At this point 'buf' is guaranteed to be a kernel buffer
* and therefore no access to userspace (and therefore sleeping) * and therefore no access to userspace (and therefore sleeping)
* will be needed. The con_buf_sem serializes all tty based * will be needed. The con_buf_sem serializes all tty based
* console rendering and vcs write/read operations. We hold * console rendering and vcs write/read operations. We hold
......
...@@ -292,7 +292,7 @@ static void pppoe_flush_dev(struct net_device *dev) ...@@ -292,7 +292,7 @@ static void pppoe_flush_dev(struct net_device *dev)
/* Now restart from the beginning of this /* Now restart from the beginning of this
* hash chain. We always NULL out pppoe_dev * hash chain. We always NULL out pppoe_dev
* so we are guarenteed to make forward * so we are guaranteed to make forward
* progress. * progress.
*/ */
po = item_hash_table[hash]; po = item_hash_table[hash];
......
...@@ -2991,7 +2991,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, ...@@ -2991,7 +2991,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gem_begin_auto_negotiation(gp, NULL); gem_begin_auto_negotiation(gp, NULL);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
/* It is guarenteed that the returned buffer will be at least /* It is guaranteed that the returned buffer will be at least
* PAGE_SIZE aligned. * PAGE_SIZE aligned.
*/ */
gp->init_block = (struct gem_init_block *) gp->init_block = (struct gem_init_block *)
......
...@@ -830,7 +830,7 @@ struct gem_txd { ...@@ -830,7 +830,7 @@ struct gem_txd {
* RX Kick register) by the driver it must make sure the buffers are * RX Kick register) by the driver it must make sure the buffers are
* truly ready and that the ownership bits are set properly. * truly ready and that the ownership bits are set properly.
* *
* Even though GEM modifies the RX descriptors, it guarentees that the * Even though GEM modifies the RX descriptors, it guarantees that the
* buffer DMA address field will stay the same when it performs these * buffer DMA address field will stay the same when it performs these
* updates. Therefore it can be used to keep track of DMA mappings * updates. Therefore it can be used to keep track of DMA mappings
* by the host driver just as in the TX descriptor case above. * by the host driver just as in the TX descriptor case above.
......
...@@ -2194,7 +2194,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, ...@@ -2194,7 +2194,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
int i; int i;
#if !PCI_DMA_BUS_IS_PHYS #if !PCI_DMA_BUS_IS_PHYS
/* IOMMU, just map the guilty area again which is guarenteed to /* IOMMU, just map the guilty area again which is guaranteed to
* use different addresses. * use different addresses.
*/ */
...@@ -2229,7 +2229,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, ...@@ -2229,7 +2229,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
return -1; return -1;
} }
/* New SKB is guarenteed to be linear. */ /* New SKB is guaranteed to be linear. */
entry = *start; entry = *start;
new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
......
...@@ -334,7 +334,7 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy) ...@@ -334,7 +334,7 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
* @bdev: I/O target * @bdev: I/O target
* *
* Return the approximate number of pages we can send to this target. * Return the approximate number of pages we can send to this target.
* There's no guarentee that you will be able to fit this number of pages * There's no guarantee that you will be able to fit this number of pages
* into a bio, it does not account for dynamic restrictions that vary * into a bio, it does not account for dynamic restrictions that vary
* on offset. * on offset.
*/ */
......
...@@ -598,7 +598,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -598,7 +598,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
* If a page is already under I/O, generic_writepages() skips it, even * If a page is already under I/O, generic_writepages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback, * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
* but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
* and msync() need to guarentee that all the data which was dirty at the time * and msync() need to guarantee that all the data which was dirty at the time
* the call was made get new I/O started against them. So if called_for_sync() * the call was made get new I/O started against them. So if called_for_sync()
* is true, we must wait for existing IO to complete. * is true, we must wait for existing IO to complete.
* *
......
...@@ -97,7 +97,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, ...@@ -97,7 +97,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
SIZE must match what was provided for in a previous pci_map_single SIZE must match what was provided for in a previous pci_map_single
call. All other usages are undefined. After this call, reads by call. All other usages are undefined. After this call, reads by
the cpu to the buffer are guarenteed to see whatever the device the cpu to the buffer are guaranteed to see whatever the device
wrote there. */ wrote there. */
extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int); extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
......
...@@ -102,7 +102,7 @@ extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, ...@@ -102,7 +102,7 @@ extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
......
...@@ -126,7 +126,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, ...@@ -126,7 +126,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
......
...@@ -118,7 +118,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, ...@@ -118,7 +118,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
......
...@@ -59,7 +59,7 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, ...@@ -59,7 +59,7 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
......
...@@ -59,7 +59,7 @@ struct pci_iommu { ...@@ -59,7 +59,7 @@ struct pci_iommu {
unsigned long iommu_ctxflush; /* IOMMU context flush register */ unsigned long iommu_ctxflush; /* IOMMU context flush register */
/* This is a register in the PCI controller, which if /* This is a register in the PCI controller, which if
* read will have no side-effects but will guarentee * read will have no side-effects but will guarantee
* completion of all previous writes into IOMMU/STC. * completion of all previous writes into IOMMU/STC.
*/ */
unsigned long write_complete_reg; unsigned long write_complete_reg;
......
...@@ -67,7 +67,7 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, ...@@ -67,7 +67,7 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
* must match what was provided for in a previous pci_map_single call. All * must match what was provided for in a previous pci_map_single call. All
* other usages are undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guarenteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
......
...@@ -107,7 +107,7 @@ int parse_dcc(char *data, char *data_end, u_int32_t * ip, u_int16_t * port, ...@@ -107,7 +107,7 @@ int parse_dcc(char *data, char *data_end, u_int32_t * ip, u_int16_t * port,
static int help(const struct iphdr *iph, size_t len, static int help(const struct iphdr *iph, size_t len,
struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{ {
/* tcplen not negative guarenteed by ip_conntrack_tcp.c */ /* tcplen not negative guaranteed by ip_conntrack_tcp.c */
struct tcphdr *tcph = (void *) iph + iph->ihl * 4; struct tcphdr *tcph = (void *) iph + iph->ihl * 4;
const char *data = (const char *) tcph + tcph->doff * 4; const char *data = (const char *) tcph + tcph->doff * 4;
const char *_data = data; const char *_data = data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment