Commit 45711f1a authored by Jens Axboe's avatar Jens Axboe

[SG] Update drivers to use sg helpers

Signed-off-by: default avatarJens Axboe <[email protected]>
parent 78c2f0b8
......@@ -1115,7 +1115,7 @@ static void do_ubd_request(struct request_queue *q)
}
prepare_request(req, io_req,
(unsigned long long) req->sector << 9,
sg->offset, sg->length, sg->page);
sg->offset, sg->length, sg_page(sg));
last_sectors = sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
......
......@@ -4296,7 +4296,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
if (pad_buf) {
struct scatterlist *psg = &qc->pad_sgent;
void *addr = kmap_atomic(psg->page, KM_IRQ0);
void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(addr + psg->offset, pad_buf, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
......@@ -4686,11 +4686,11 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
* data in this function or read data in ata_sg_clean.
*/
offset = lsg->offset + lsg->length - qc->pad_len;
psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
psg->offset = offset_in_page(offset);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
void *addr = kmap_atomic(psg->page, KM_IRQ0);
void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(pad_buf, addr + psg->offset, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
......@@ -4836,7 +4836,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
page = qc->cursg->page;
page = sg_page(qc->cursg);
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
......@@ -4988,7 +4988,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
sg = qc->cursg;
page = sg->page;
page = sg_page(sg);
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
......
......@@ -1544,7 +1544,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
struct scatterlist *sg = scsi_sglist(cmd);
if (sg) {
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buflen = sg->length;
} else {
buf = NULL;
......
......@@ -345,6 +345,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
Command->V1.ScatterGatherList =
(DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
} else {
Command->cmd_sglist = Command->V2.ScatterList;
Command->V2.ScatterGatherList =
......@@ -353,6 +354,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
Command->V2.RequestSense =
(DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
Command->V2.RequestSenseDMA = RequestSenseDMA;
sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
}
}
return true;
......
......@@ -2610,7 +2610,7 @@ static void do_cciss_request(struct request_queue *q)
(int)creq->nr_sectors);
#endif /* CCISS_DEBUG */
memset(tmp_sg, 0, sizeof(tmp_sg));
sg_init_table(tmp_sg, MAXSGENTRIES);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* get the DMA records for the setup */
......@@ -2621,7 +2621,7 @@ static void do_cciss_request(struct request_queue *q)
for (i = 0; i < seg; i++) {
c->SG[i].Len = tmp_sg[i].length;
temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
c->SG[i].Addr.lower = temp64.val32.lower;
......
......@@ -918,6 +918,7 @@ static void do_ida_request(struct request_queue *q)
DBGPX(
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
);
sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* Now do all the DMA Mappings */
......@@ -929,7 +930,7 @@ DBGPX(
{
c->req.sg[i].size = tmp_sg[i].length;
c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
tmp_sg[i].page,
sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
......
......@@ -26,6 +26,7 @@
#include <linux/crypto.h>
#include <linux/blkdev.h>
#include <linux/loop.h>
#include <linux/scatterlist.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
......@@ -119,14 +120,17 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
.tfm = tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
};
struct scatterlist sg_out = { NULL, };
struct scatterlist sg_in = { NULL, };
struct scatterlist sg_out;
struct scatterlist sg_in;
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
int err;
sg_init_table(&sg_out, 1);
sg_init_table(&sg_in, 1);
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
......@@ -146,11 +150,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
u32 iv[4] = { 0, };
iv[0] = cpu_to_le32(IV & 0xffffffff);
sg_in.page = in_page;
sg_set_page(&sg_in, in_page);
sg_in.offset = in_offs;
sg_in.length = sz;
sg_out.page = out_page;
sg_set_page(&sg_out, out_page);
sg_out.offset = out_offs;
sg_out.length = sz;
......
......@@ -388,6 +388,7 @@ static int __send_request(struct request *req)
op = VD_OP_BWRITE;
}
sg_init_table(sg, port->ring_cookies);
nsg = blk_rq_map_sg(req->q, req, sg);
len = 0;
......
......@@ -522,6 +522,7 @@ static struct carm_request *carm_get_request(struct carm_host *host)
host->n_msgs++;
assert(host->n_msgs <= CARM_MAX_REQ);
sg_init_table(crq->sg, CARM_MAX_REQ_SG);
return crq;
}
......
......@@ -25,6 +25,7 @@
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
......@@ -656,6 +657,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
blkdev_dequeue_request(rq);
......@@ -1309,9 +1311,8 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
page_address(sg->page) + sg->offset, sg->length,
ub_urb_complete, sc);
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
sg->length, ub_urb_complete, sc);
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
......@@ -1427,7 +1428,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
sg->page = virt_to_page(sc->top_sense);
sg_set_page(sg, virt_to_page(sc->top_sense));
sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
sg->length = UB_SENSE_SIZE;
scmd->len = UB_SENSE_SIZE;
......@@ -1863,7 +1864,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
sg->page = virt_to_page(p);
sg_set_page(sg, virt_to_page(p));
sg->offset = (unsigned long)p & (PAGE_SIZE-1);
sg->length = 8;
cmd->len = 8;
......
......@@ -41,6 +41,7 @@
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <asm/uaccess.h>
#include <asm/vio.h>
......@@ -270,6 +271,7 @@ static int send_request(struct request *req)
d = req->rq_disk->private_data;
/* Now build the scatter-gather list */
sg_init_table(sg, VIOMAXBLOCKDMA);
nsg = blk_rq_map_sg(req->q, req, sg);
nsg = dma_map_sg(d->dev, sg, nsg, direction);
......
......@@ -935,11 +935,11 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
* than two possibly non-adjacent physical 4kB pages.
*/
/* group sequential buffers into one large buffer */
addr = page_to_phys(sg->page) + sg->offset;
addr = sg_phys(sg);
size = sg_dma_len(sg);
while (--i) {
sg = sg_next(sg);
if ((addr + size) != page_to_phys(sg->page) + sg->offset)
if ((addr + size) != sg_phys(sg))
break;
size += sg_dma_len(sg);
}
......
......@@ -1317,12 +1317,14 @@ static int hwif_init(ide_hwif_t *hwif)
if (!hwif->sg_max_nents)
hwif->sg_max_nents = PRD_ENTRIES;
hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
GFP_KERNEL);
if (!hwif->sg_table) {
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
goto out;
}
sg_init_table(hwif->sg_table, hwif->sg_max_nents);
if (init_irq(hwif) == 0)
goto done;
......
......@@ -261,7 +261,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
hwif->cursg = sg;
}
page = cursg->page;
page = sg_page(cursg);
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
/* get the current page and offset */
......
......@@ -276,8 +276,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
if (iswrite) {
if(!put_source_flags(ahwif->tx_chan,
(void*)(page_address(sg->page)
+ sg->offset),
(void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
......@@ -285,8 +284,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
} else
{
if(!put_dest_flags(ahwif->rx_chan,
(void*)(page_address(sg->page)
+ sg->offset),
(void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
......
......@@ -111,7 +111,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
unsigned long va =
(unsigned long)dma->kvirt + (i << PAGE_SHIFT);
dma->sglist[i].page = vmalloc_to_page((void *)va);
sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
dma->sglist[i].length = PAGE_SIZE;
}
......
......@@ -1466,7 +1466,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
cmd->dma_size = sgpnt[0].length;
cmd->dma_type = CMD_DMA_PAGE;
cmd->cmd_dma = dma_map_page(hi->host->device.parent,
sgpnt[0].page, sgpnt[0].offset,
sg_page(&sgpnt[0]), sgpnt[0].offset,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_lo = cmd->cmd_dma;
......
......@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
ib_dma_unmap_sg(dev, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
struct page *page = sg_page(&chunk->page_list[i]);
if (umem->writable && dirty)
set_page_dirty_lock(chunk->page_list[i].page);
put_page(chunk->page_list[i].page);
set_page_dirty_lock(page);
put_page(page);
}
kfree(chunk);
......@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
}
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
sg_init_table(chunk->page_list, chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (vma_list &&
!is_vm_hugetlb_page(vma_list[i + off]))
umem->hugetlb = 0;
chunk->page_list[i].page = page_list[i + off];
sg_set_page(&chunk->page_list[i], page_list[i + off]);
chunk->page_list[i].offset = 0;
chunk->page_list[i].length = PAGE_SIZE;
}
......@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(chunk->page_list[i].page);
put_page(sg_page(&chunk->page_list[i]));
kfree(chunk);
ret = -ENOMEM;
......
......@@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
addr = (u64) page_address(sg->page);
addr = (u64) page_address(sg_page(sg));
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
......@@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
u64 addr = (u64) page_address(sg->page);
u64 addr = (u64) page_address(sg_page(sg));
if (addr)
addr += sg->offset;
......
......@@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(chunk->page_list[i].page);
vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
......
......@@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(chunk->mem[i].page,
__free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
......@@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
for (i = 0; i < chunk->npages; ++i) {
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
lowmem_page_address(chunk->mem[i].page),
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
}
......@@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
mem->page = alloc_pages(gfp_mask, order);
if (!mem->page)
struct page *page;
page = alloc_pages(gfp_mask, order);
if (!page)
return -ENOMEM;
sg_set_page(mem, page);
mem->length = PAGE_SIZE << order;
mem->offset = 0;
return 0;
......@@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
if (!chunk)
goto fail;
sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
......@@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
* so if we found the page, dma_handle has already
* been assigned to. */
if (chunk->mem[i].length > offset) {
page = chunk->mem[i].page;
page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
......@@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
struct mthca_user_db_table *db_tab, int index, u64 uaddr)
{
struct page *pages[1];
int ret = 0;
u8 status;
int i;
......@@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
}
ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
&db_tab->page[i].mem.page, NULL);
pages, NULL);
if (ret < 0)
goto out;
sg_set_page(&db_tab->page[i].mem, pages[0]);
db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
if (ret < 0) {
put_page(db_tab->page[i].mem.page);
put_page(pages[0]);
goto out;
}
......@@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
ret = -EINVAL;
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
put_page(db_tab->page[i].mem.page);
put_page(sg_page(&db_tab->page[i].mem));
goto out;
}
......@@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
put_page(db_tab->page[i].mem.page);
put_page(sg_page(&db_tab->page[i].mem));
}
}
......
......@@ -131,7 +131,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
p = mem;
for_each_sg(sgl, sg, data->size, i) {
from = kmap_atomic(sg->page, KM_USER0);
from = kmap_atomic(sg_page(sg), KM_USER0);
memcpy(p,
from + sg->offset,
sg->length);
......@@ -191,7 +191,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
to = kmap_atomic(sg->page, KM_SOFTIRQ0);
to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
memcpy(to + sg->offset,
p,
sg->length);
......@@ -300,7 +300,7 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
for_each_sg(sgl, sg, data->dma_nents, i) {
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
"offset: %ld sz: %ld\n", i,
(unsigned long)page_to_phys(sg->page),
(unsigned long)sg_phys(sg),
(unsigned long)sg->offset,
(unsigned long)sg->length); */
end_addr = ib_sg_dma_address(ibdev, sg) +
......@@ -336,7 +336,7 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
sg->page, sg->offset,
sg_page(sg), sg->offset,
sg->length, ib_sg_dma_len(ibdev, sg));
}
......
......@@ -348,16 +348,17 @@ static int crypt_convert(struct crypt_config *cc,
ctx->idx_out < ctx->bio_out->bi_vcnt) {
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
struct scatterlist sg_in = {
.page = bv_in->bv_page,
.offset = bv_in->bv_offset + ctx->offset_in,
.length = 1 << SECTOR_SHIFT
};
struct scatterlist sg_out = {
.page = bv_out->bv_page,
.offset = bv_out->bv_offset + ctx->offset_out,
.length = 1 << SECTOR_SHIFT
};
struct scatterlist sg_in, sg_out;
sg_init_table(&sg_in, 1);
sg_set_page(&sg_in, bv_in->bv_page);
sg_in.offset = bv_in->bv_offset + ctx->offset_in;
sg_in.length = 1 << SECTOR_SHIFT;
sg_init_table(&sg_out, 1);
sg_set_page(&sg_out, bv_out->bv_page);
sg_out.offset = bv_out->bv_offset + ctx->offset_out;
sg_out.length = 1 << SECTOR_SHIFT;
ctx->offset_in += sg_in.length;
if (ctx->offset_in >= bv_in->bv_len) {
......
......@@ -112,12 +112,13 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
sglist[i].page = pg;
sg_set_page(&sglist[i], pg);
sglist[i].length = PAGE_SIZE;
}
return sglist;
......
......@@ -63,10 +63,10 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
kunmap_atomic(src, KM_BOUNCE_READ);
local_irq_restore(flags);
dma->SGlist[map_offset].page = dma->bouncemap[map_offset];
sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]);
}
else {
dma->SGlist[map_offset].page = dma->map[map_offset];
sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]);
}
offset = 0;
map_offset++;
......
......@@ -60,12 +60,13 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
sglist[i].page = pg;
sg_set_page(&sglist[i], pg);
sglist[i].length = PAGE_SIZE;
}
return sglist;
......@@ -86,13 +87,14 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
if (NULL == pages[0])
goto nopage;
if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */
goto highmem;
sglist[0].page = pages[0];
sg_set_page(&sglist[0], pages[0]);
sglist[0].offset = offset;
sglist[0].length = PAGE_SIZE - offset;
for (i = 1; i < nr_pages; i++) {
......@@ -100,7 +102,7 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
sglist[i].page = pages[i];
sg_set_page(&sglist[i], pages[i]);
sglist[i].length = PAGE_SIZE;
}
return sglist;
......
......@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
......@@ -153,19 +154,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
mq->sg = kzalloc(sizeof(struct scatterlist),
mq->sg = kmalloc(sizeof(struct scatterlist),
GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
sg_init_table(mq->sg, 1);
mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
bouncesz / 512, GFP_KERNEL);
if (!mq->bounce_sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
......@@ -302,12 +305,12 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
BUG_ON(dst_len == 0);
if (dst_size == 0) {
dst_buf = page_address(dst->page) + dst->offset;
dst_buf = sg_virt(dst);
dst_size = dst->length;
}
if (src_size == 0) {
src_buf = page_address(src->page) + src->offset;
src_buf = sg_virt(dst);
src_size = src->length;
}
......@@ -353,9 +356,7 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
return 1;
}
mq->sg[0].page = virt_to_page(mq->bounce_buf);
mq->sg[0].offset = offset_in_page(mq->bounce_buf);
mq->sg[0].length = 0;
sg_init_one(mq->sg, mq->bounce_buf, 0);
while (sg_len) {
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
......
......@@ -149,7 +149,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
sg = &data->sg[i];
sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
amount = min(size, sg->length);
size -= amount;