Commit 83300c1c authored by David S. Miller's avatar David S. Miller Committed by Greg Kroah-Hartman

[MM]: Add and use offset_in_page() convenience macro.

Based upon patches from Yoshfuji Hideaki <yoshfuji@linux-ipv6.org>
parent 3b6e7223
......@@ -689,7 +689,7 @@ portably refer to any piece of memory. If you have a cpu pointer
and offset using something like this:
struct page *page = virt_to_page(ptr);
unsigned long offset = ((unsigned long)ptr & ~PAGE_MASK);
unsigned long offset = offset_in_page(ptr);
Here are the interfaces:
......
......@@ -26,7 +26,7 @@ static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen)
struct scatterlist tmp;
tmp.page = virt_to_page(key);
tmp.offset = ((long)key & ~PAGE_MASK);
tmp.offset = offset_in_page(key);
tmp.length = keylen;
crypto_digest_digest(tfm, &tmp, 1, key);
......@@ -71,7 +71,7 @@ void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen)
ipad[i] ^= 0x36;
tmp.page = virt_to_page(ipad);
tmp.offset = ((long)ipad & ~PAGE_MASK);
tmp.offset = offset_in_page(ipad);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
......@@ -105,14 +105,14 @@ void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
opad[i] ^= 0x5c;
tmp.page = virt_to_page(opad);
tmp.offset = ((long)opad & ~PAGE_MASK);
tmp.offset = offset_in_page(opad);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
tmp.page = virt_to_page(out);
tmp.offset = ((long)out & ~PAGE_MASK);
tmp.offset = offset_in_page(out);
tmp.length = crypto_tfm_alg_digestsize(tfm);
crypto_digest_update(tfm, &tmp, 1);
......
This diff is collapsed.
......@@ -162,11 +162,11 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, char *raw_buf,
iv[0] = cpu_to_le32(IV & 0xffffffff);
sg_in.page = virt_to_page(in);
sg_in.offset = (unsigned long)in & ~PAGE_MASK;
sg_in.offset = offset_in_page(in);
sg_in.length = sz;
sg_out.page = virt_to_page(out);
sg_out.offset = (unsigned long)out & ~PAGE_MASK;
sg_out.offset = offset_in_page(out);
sg_out.length = sz;
encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
......
......@@ -233,7 +233,7 @@ static void ide_build_sglist(ide_drive_t *drive, struct request *rq)
memset(sg, 0, sizeof(*sg));
sg->page = virt_to_page(rq->buffer);
sg->offset = ((unsigned long)rq->buffer) & ~PAGE_MASK;
sg->offset = offset_in_page(rq->buffer);
sg->length = rq->nr_sectors * SECTOR_SIZE;
nents = 1;
} else {
......
......@@ -255,7 +255,7 @@ static int ide_raw_build_sglist (ide_drive_t *drive, struct request *rq)
#endif
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].offset = offset_in_page(virt_addr);
sg[nents].length = 128 * SECTOR_SIZE;
nents++;
virt_addr = virt_addr + (128 * SECTOR_SIZE);
......@@ -263,7 +263,7 @@ static int ide_raw_build_sglist (ide_drive_t *drive, struct request *rq)
}
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].offset = offset_in_page(virt_addr);
sg[nents].length = sector_count * SECTOR_SIZE;
nents++;
......
......@@ -971,7 +971,7 @@ pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
if (sector_count > 127) {
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].offset = offset_in_page(virt_addr);
sg[nents].length = 127 * SECTOR_SIZE;
nents++;
virt_addr = virt_addr + (127 * SECTOR_SIZE);
......@@ -979,7 +979,7 @@ pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
}
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].offset = offset_in_page(virt_addr);
sg[nents].length = sector_count * SECTOR_SIZE;
nents++;
......
......@@ -1960,7 +1960,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
*/
skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
((unsigned long)skb->data & ~PAGE_MASK),
offset_in_page(skb->data),
ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
......@@ -2026,7 +2026,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
*/
skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
((unsigned long)skb->data & ~PAGE_MASK),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
......@@ -2087,7 +2087,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
*/
skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
((unsigned long)skb->data & ~PAGE_MASK),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
......@@ -2743,7 +2743,7 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
struct tx_ring_info *info;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
((unsigned long) skb->data & ~PAGE_MASK),
offset_in_page(skb->data),
skb->len, PCI_DMA_TODEVICE);
info = ap->skb->tx_skbuff + idx;
......
......@@ -2142,7 +2142,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
*/
PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
virt_to_page(pMessage->data),
((unsigned long) pMessage->data & ~PAGE_MASK),
offset_in_page(pMessage->data),
pMessage->len,
PCI_DMA_TODEVICE);
pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
......@@ -2259,7 +2259,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
*/
PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
virt_to_page(pMessage->data),
((unsigned long) pMessage->data & ~PAGE_MASK),
offset_in_page(pMessage->data),
skb_headlen(pMessage),
PCI_DMA_TODEVICE);
......@@ -2518,8 +2518,7 @@ SK_U64 PhysAddr; /* physical address of a rx buffer */
Length = pAC->RxBufSize;
PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
virt_to_page(pMsgBlock->data),
((unsigned long) pMsgBlock->data &
~PAGE_MASK),
offset_in_page(pMsgBlock->data),
pAC->RxBufSize - 2,
PCI_DMA_FROMDEVICE);
......
......@@ -725,8 +725,7 @@ static void gem_rx(struct gem *gp)
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
virt_to_page(new_skb->data),
((unsigned long) new_skb->data &
~PAGE_MASK),
offset_in_page(new_skb->data),
RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE));
skb_reserve(new_skb, RX_OFFSET);
......@@ -873,8 +872,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb->len;
mapping = pci_map_page(gp->pdev,
virt_to_page(skb->data),
((unsigned long) skb->data &
~PAGE_MASK),
offset_in_page(skb->data),
len, PCI_DMA_TODEVICE);
ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
if (gem_intme(entry))
......@@ -898,7 +896,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
first_len = skb_headlen(skb);
first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
((unsigned long) skb->data & ~PAGE_MASK),
offset_in_page(skb->data),
first_len, PCI_DMA_TODEVICE);
entry = NEXT_TX(entry);
......@@ -1464,8 +1462,7 @@ static void gem_init_rings(struct gem *gp)
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
dma_addr = pci_map_page(gp->pdev,
virt_to_page(skb->data),
((unsigned long) skb->data &
~PAGE_MASK),
offset_in_page(skb->data),
RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE);
rxd->buffer = cpu_to_le64(dma_addr);
......
......@@ -2112,7 +2112,7 @@ static u32 tw_map_scsi_single_data(struct pci_dev *pdev, Scsi_Cmnd *cmd)
if (cmd->request_bufflen == 0)
return 0;
mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), ((unsigned long)cmd->request_buffer & ~PAGE_MASK), cmd->request_bufflen, dma_dir);
mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, dma_dir);
if (mapping == 0) {
printk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data(): pci_map_page() failed.\n");
......
......@@ -23,7 +23,7 @@ static inline int copy_SCp_to_sg(struct scatterlist *sg, Scsi_Pointer *SCp, int
BUG_ON(bufs + 1 > max);
sg->page = virt_to_page(SCp->ptr);
sg->offset = ((unsigned int)SCp->ptr) & ~PAGE_MASK;
sg->offset = offset_in_page(SCp->ptr);
sg->length = SCp->this_residual;
if (bufs)
......
......@@ -761,8 +761,8 @@ static inline struct bio *idescsi_dma_bio(ide_drive_t *drive, idescsi_pc_t *pc)
printk ("ide-scsi: %s: building DMA table for a single buffer (%dkB)\n", drive->name, pc->request_transfer >> 10);
#endif /* IDESCSI_DEBUG_LOG */
bh->bi_io_vec[0].bv_page = virt_to_page(pc->scsi_cmd->request_buffer);
bh->bi_io_vec[0].bv_offset = offset_in_page(pc->scsi_cmd->request_buffer);
bh->bi_io_vec[0].bv_len = pc->request_transfer;
bh->bi_io_vec[0].bv_offset = (unsigned long) pc->scsi_cmd->request_buffer & ~PAGE_MASK;
bh->bi_size = pc->request_transfer;
}
return first_bh;
......
......@@ -2275,8 +2275,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
if( !cmd->use_sg ) {
page = virt_to_page(cmd->request_buffer);
offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
offset = offset_in_page(cmd->request_buffer);
scb->dma_h_bulkdata = pci_map_page(adapter->dev,
page, offset,
......
......@@ -1282,8 +1282,7 @@ int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
}
} else if (Cmnd->request_bufflen && Cmnd->sc_data_direction != PCI_DMA_NONE) {
struct page *page = virt_to_page(Cmnd->request_buffer);
unsigned long offset = ((unsigned long)Cmnd->request_buffer &
~PAGE_MASK);
unsigned long offset = offset_in_page(Cmnd->request_buffer);
dma_addr_t busaddr = pci_map_page(hostdata->pci_dev,
page, offset,
Cmnd->request_bufflen,
......@@ -1926,8 +1925,7 @@ static int isp2x00_reset_hardware(struct Scsi_Host *host)
*/
busaddr = pci_map_page(hostdata->pci_dev,
virt_to_page(&hostdata->control_block),
((unsigned long) &hostdata->control_block &
~PAGE_MASK),
offset_in_page(&hostdata->control_block),
sizeof(hostdata->control_block),
PCI_DMA_BIDIRECTIONAL);
......
......@@ -1814,7 +1814,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
break;
}
sclp->page = virt_to_page(p);
sclp->offset = (unsigned long) p & ~PAGE_MASK;
sclp->offset = offset_in_page(p);
sclp->length = ret_sz;
SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
......
......@@ -1162,8 +1162,7 @@ static dma_addr_t __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
mapping = pci_map_page(pdev,
virt_to_page(cmd->request_buffer),
((unsigned long)cmd->request_buffer &
~PAGE_MASK),
offset_in_page(cmd->request_buffer),
cmd->request_bufflen, dma_dir);
__data_mapped(cmd) = 1;
__data_mapping(cmd) = mapping;
......
......@@ -271,7 +271,7 @@ alloc_sglist (int nents, int max, int vary)
/* kmalloc pages are always physically contiguous! */
sg [i].page = virt_to_page (buf);
sg [i].offset = ((unsigned) buf) & ~PAGE_MASK;
sg [i].offset = offset_in_page (buf);
sg [i].length = size;
if (vary) {
......
......@@ -1127,7 +1127,7 @@ sddr09_read_map(struct us_data *us) {
char *vaddr = kmalloc(alloc_req, GFP_NOIO);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,3)
sg[i].page = virt_to_page(vaddr);
sg[i].offset = ((unsigned long)vaddr & ~PAGE_MASK);
sg[i].offset = offset_in_page(vaddr);
#else
sg[i].address = vaddr;
#endif
......
......@@ -278,9 +278,6 @@ static void journal_kill_thread(journal_t *journal)
* Bit 1 set == buffer copy-out performed (kfree the data after IO)
*/
static inline unsigned long virt_to_offset(void *p)
{return ((unsigned long) p) & ~PAGE_MASK;}
int journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
struct journal_head **jh_out,
......@@ -318,10 +315,10 @@ int journal_write_metadata_buffer(transaction_t *transaction,
if (jh_in->b_frozen_data) {
done_copy_out = 1;
new_page = virt_to_page(jh_in->b_frozen_data);
new_offset = virt_to_offset(jh_in->b_frozen_data);
new_offset = offset_in_page(jh_in->b_frozen_data);
} else {
new_page = jh2bh(jh_in)->b_page;
new_offset = virt_to_offset(jh2bh(jh_in)->b_data);
new_offset = offset_in_page(jh2bh(jh_in)->b_data);
}
mapped_data = kmap_atomic(new_page, KM_USER0);
......@@ -358,7 +355,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
address kmapped so that we can clear the escaped
magic number below. */
new_page = virt_to_page(tmp);
new_offset = virt_to_offset(tmp);
new_offset = offset_in_page(tmp);
done_copy_out = 1;
}
......
......@@ -400,6 +400,8 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_MINOR 1
#define VM_FAULT_MAJOR 2
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
extern void show_free_areas(void);
struct page *shmem_nopage(struct vm_area_struct * vma,
......
......@@ -1110,10 +1110,10 @@ static int __ipv6_regen_rndid(struct inet6_dev *idev)
struct scatterlist sg[2];
sg[0].page = virt_to_page(idev->entropy);
sg[0].offset = ((long) idev->entropy & ~PAGE_MASK);
sg[0].offset = offset_in_page(idev->entropy);
sg[0].length = 8;
sg[1].page = virt_to_page(eui64);
sg[1].offset = ((long) eui64 & ~PAGE_MASK);
sg[1].offset = offset_in_page(eui64);
sg[1].length = 8;
dev = idev->dev;
......
......@@ -75,7 +75,7 @@ krb5_encrypt(
memcpy(out, in, length);
sg[0].page = virt_to_page(out);
sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].offset = offset_in_page(out);
sg[0].length = length;
ret = crypto_cipher_encrypt(tfm, sg, sg, length);
......@@ -114,7 +114,7 @@ krb5_decrypt(
memcpy(out, in, length);
sg[0].page = virt_to_page(out);
sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].offset = offset_in_page(out);
sg[0].length = length;
ret = crypto_cipher_decrypt(tfm, sg, sg, length);
......@@ -151,7 +151,7 @@ krb5_make_checksum(s32 cksumtype, struct xdr_netobj *input,
goto out_free_tfm;
}
sg[0].page = virt_to_page(input->data);
sg[0].offset = ((long)input->data & ~PAGE_MASK);
sg[0].offset = offset_in_page(input->data);
sg[0].length = input->len;
crypto_digest_init(tfm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment