add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,52 @@
#
# MMC/SD card drivers
#
comment "MMC/SD/SDIO Card Drivers"
config MMC_BLOCK
tristate "MMC block device driver"
depends on BLOCK
default y
help
Say Y here to enable the MMC block device driver support.
This provides a block device driver, which you can use to
mount the filesystem. Almost everyone wishing MMC support
should say Y or M here.
config MMC_BLOCK_BOUNCE
bool "Use bounce buffer for simple hosts"
depends on MMC_BLOCK
default y
help
SD/MMC is a high latency protocol where it is crucial to
send large requests in order to get high performance. Many
controllers, however, are restricted to continuous memory
(i.e. they can't do scatter-gather), something the kernel
rarely can provide.
Say Y here to help these restricted hosts by bouncing
requests back and forth from a large buffer. You will get
a big performance gain at the cost of up to 64 KiB of
physical memory.
If unsure, say Y here.
config SDIO_UART
tristate "SDIO UART/GPS class support"
help
SDIO function driver for SDIO cards that implements the UART
class, as well as the GPS class which appears like a UART.
config MMC_TEST
tristate "MMC host test driver"
default n
help
Development driver that performs a series of reads and writes
to a memory card in order to expose certain well known bugs
in host controllers. The tests are executed by writing to the
"test" file in sysfs under each card. Note that whatever is
on your card will be overwritten by these tests.
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.

View File

@@ -0,0 +1,14 @@
#
# Makefile for MMC/SD card drivers
#
ifeq ($(CONFIG_MMC_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
mmc_block-objs := block.o queue.o
obj-$(CONFIG_MMC_TEST) += mmc_test.o
obj-$(CONFIG_SDIO_UART) += sdio_uart.o

View File

@@ -0,0 +1,712 @@
/*
* Block driver for media (i.e., flash cards)
*
* Copyright 2002 Hewlett-Packard Company
* Copyright 2005-2008 Pierre Ossman
*
* Use consistent with the GNU GPL is permitted,
* provided that this copyright notice is
* preserved in its entirety in all copies and derived works.
*
* HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
* FITNESS FOR ANY PARTICULAR PURPOSE.
*
* Many thanks to Alessandro Rubini and Jonathan Corbet!
*
* Author: Andrew Christian
* 28 May 2002
*/
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include "queue.h"
MODULE_ALIAS("mmc:block");
/*
* max 8 partitions per card
*/
#define MMC_SHIFT 3
#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
/*
* There is one mmc_blk_data per slot.
*/
struct mmc_blk_data {
spinlock_t lock;
struct gendisk *disk;
struct mmc_queue queue;
unsigned int usage;
unsigned int read_only;
};
static DEFINE_MUTEX(open_lock);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
mutex_lock(&open_lock);
md = disk->private_data;
if (md && md->usage == 0)
md = NULL;
if (md)
md->usage++;
mutex_unlock(&open_lock);
return md;
}
static void mmc_blk_put(struct mmc_blk_data *md)
{
mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
int devmaj = MAJOR(disk_devt(md->disk));
int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
if (!devmaj)
devidx = md->disk->first_minor >> MMC_SHIFT;
blk_cleanup_queue(md->queue.queue);
__clear_bit(devidx, dev_use);
put_disk(md->disk);
kfree(md);
}
mutex_unlock(&open_lock);
}
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
int ret = -ENXIO;
if (md) {
if (md->usage == 2)
check_disk_change(bdev);
ret = 0;
if ((mode & FMODE_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
}
}
return ret;
}
static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
struct mmc_blk_data *md = disk->private_data;
mmc_blk_put(md);
return 0;
}
static int
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
}
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
};
struct mmc_blk_request {
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
};
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
int err;
u32 result;
__be32 *blocks;
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_data data;
unsigned int timeout_us;
struct scatterlist sg;
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_APP_CMD;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err)
return (u32)-1;
if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
return (u32)-1;
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
memset(&data, 0, sizeof(struct mmc_data));
data.timeout_ns = card->csd.tacc_ns * 100;
data.timeout_clks = card->csd.tacc_clks * 100;
timeout_us = data.timeout_ns / 1000;
timeout_us += data.timeout_clks * 1000 /
(card->host->ios.clock / 1000);
if (timeout_us > 100000) {
data.timeout_ns = 100000000;
data.timeout_clks = 0;
}
data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
memset(&mrq, 0, sizeof(struct mmc_request));
mrq.cmd = &cmd;
mrq.data = &data;
blocks = kmalloc(4, GFP_KERNEL);
if (!blocks)
return (u32)-1;
sg_init_one(&sg, blocks, 4);
mmc_wait_for_req(card->host, &mrq);
result = ntohl(*blocks);
kfree(blocks);
if (cmd.error || data.error)
result = (u32)-1;
return result;
}
static u32 get_card_status(struct mmc_card *card, struct request *req)
{
struct mmc_command cmd;
int err;
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err)
printk(KERN_ERR "%s: error %d sending status comand",
req->rq_disk->disk_name, err);
return cmd.resp[0];
}
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request brq;
int ret = 1, disable_multi = 0;
mmc_claim_host(card->host);
do {
struct mmc_command cmd;
u32 readcmd, writecmd, status = 0;
memset(&brq, 0, sizeof(struct mmc_blk_request));
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
brq.cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
brq.data.blksz = 512;
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq.data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
* requests.
*/
if (brq.data.blocks > card->host->max_blk_count)
brq.data.blocks = card->host->max_blk_count;
/*
* After a read error, we redo the request one sector at a time
* in order to accurately determine which sectors can be read
* successfully.
*/
if (disable_multi && brq.data.blocks > 1)
brq.data.blocks = 1;
if (brq.data.blocks > 1) {
/* SPI multiblock writes terminate using a special
* token, not a STOP_TRANSMISSION request.
*/
if (!mmc_host_is_spi(card->host)
|| rq_data_dir(req) == READ)
brq.mrq.stop = &brq.stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
brq.mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = readcmd;
brq.data.flags |= MMC_DATA_READ;
} else {
brq.cmd.opcode = writecmd;
brq.data.flags |= MMC_DATA_WRITE;
}
mmc_set_data_timeout(&brq.data, card);
brq.data.sg = mq->sg;
brq.data.sg_len = mmc_queue_map_sg(mq);
/*
* Adjust the sg list so it is the same size as the
* request.
*/
if (brq.data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq.data.blocks << 9;
struct scatterlist *sg;
for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
data_size -= sg->length;
if (data_size <= 0) {
sg->length += data_size;
i++;
break;
}
}
brq.data.sg_len = i;
}
mmc_queue_bounce_pre(mq);
mmc_wait_for_req(card->host, &brq.mrq);
mmc_queue_bounce_post(mq);
/*
* Check for errors here, but don't jump to cmd_err
* until later as we need to wait for the card to leave
* programming mode even when things go wrong.
*/
if (brq.cmd.error || brq.data.error || brq.stop.error) {
if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
/* Redo read one sector at a time */
printk(KERN_WARNING "%s: retrying using single "
"block read\n", req->rq_disk->disk_name);
disable_multi = 1;
continue;
}
status = get_card_status(card, req);
}
if (brq.cmd.error) {
printk(KERN_ERR "%s: error %d sending read/write "
"command, response %#x, card status %#x\n",
req->rq_disk->disk_name, brq.cmd.error,
brq.cmd.resp[0], status);
}
if (brq.data.error) {
if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
/* 'Stop' response contains card status */
status = brq.mrq.stop->resp[0];
printk(KERN_ERR "%s: error %d transferring data,"
" sector %u, nr %u, card status %#x\n",
req->rq_disk->disk_name, brq.data.error,
(unsigned)blk_rq_pos(req),
(unsigned)blk_rq_sectors(req), status);
}
if (brq.stop.error) {
printk(KERN_ERR "%s: error %d sending stop command, "
"response %#x, card status %#x\n",
req->rq_disk->disk_name, brq.stop.error,
brq.stop.resp[0], status);
}
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
do {
int err;
cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 5);
if (err) {
printk(KERN_ERR "%s: error %d requesting status\n",
req->rq_disk->disk_name, err);
goto cmd_err;
}
/*
* Some cards mishandle the status bits,
* so make sure to check both the busy
* indication and the card state.
*/
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd.resp[0]) == 7));
#if 0
if (cmd.resp[0] & ~0x00000900)
printk(KERN_ERR "%s: status = %08x\n",
req->rq_disk->disk_name, cmd.resp[0]);
if (mmc_decode_status(cmd.resp))
goto cmd_err;
#endif
}
if (brq.cmd.error || brq.stop.error || brq.data.error) {
if (rq_data_dir(req) == READ) {
/*
* After an error, we redo I/O one sector at a
* time, so we only reach here after trying to
* read a single sector.
*/
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, -EIO, brq.data.blksz);
spin_unlock_irq(&md->lock);
continue;
}
goto cmd_err;
}
/*
* A block was successfully transferred.
*/
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
spin_unlock_irq(&md->lock);
} while (ret);
mmc_release_host(card->host);
return 1;
cmd_err:
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
*
* If the card is not SD, we can still ok written sectors
* as reported by the controller (which might be less than
* the real number of written sectors, but never more).
*/
if (mmc_card_sd(card)) {
u32 blocks;
blocks = mmc_sd_num_wr_blocks(card);
if (blocks != (u32)-1) {
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, 0, blocks << 9);
spin_unlock_irq(&md->lock);
}
} else {
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
spin_unlock_irq(&md->lock);
}
mmc_release_host(card->host);
spin_lock_irq(&md->lock);
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);
return 0;
}
static inline int mmc_blk_readonly(struct mmc_card *card)
{
return mmc_card_readonly(card) ||
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
struct mmc_blk_data *md;
int devidx, ret;
devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
if (devidx >= MMC_NUM_MINORS)
return ERR_PTR(-ENOSPC);
__set_bit(devidx, dev_use);
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
ret = -ENOMEM;
goto out;
}
/*
* Set the read-only status based on the supported commands
* and the write protect switch.
*/
md->read_only = mmc_blk_readonly(card);
md->disk = alloc_disk(1 << MMC_SHIFT);
if (md->disk == NULL) {
ret = -ENOMEM;
goto err_kfree;
}
spin_lock_init(&md->lock);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock);
if (ret)
goto err_putdisk;
md->queue.issue_fn = mmc_blk_issue_rq;
md->queue.data = md;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx << MMC_SHIFT;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = &card->dev;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
*
* - be set for removable media with permanent block devices
* - be unset for removable block devices with permanent media
*
* Since MMC block devices clearly fall under the second
* case, we do not set GENHD_FL_REMOVABLE. Userspace
* should use the block device creation/destruction hotplug
* messages to tell when the card is present.
*/
sprintf(md->disk->disk_name, "mmcblk%d", devidx);
blk_queue_logical_block_size(md->queue.queue, 512);
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
* The EXT_CSD sector count is in number or 512 byte
* sectors.
*/
set_capacity(md->disk, card->ext_csd.sectors);
} else {
/*
* The CSD capacity field is in units of read_blkbits.
* set_capacity takes units of 512 bytes.
*/
set_capacity(md->disk,
card->csd.capacity << (card->csd.read_blkbits - 9));
}
return md;
err_putdisk:
put_disk(md->disk);
err_kfree:
kfree(md);
out:
return ERR_PTR(ret);
}
static int
mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
{
struct mmc_command cmd;
int err;
/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
if (mmc_card_blockaddr(card))
return 0;
mmc_claim_host(card->host);
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = 512;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 5);
mmc_release_host(card->host);
if (err) {
printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
md->disk->disk_name, cmd.arg, err);
return -EINVAL;
}
return 0;
}
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
int err;
char cap_str[10];
/*
* Check that the card supports the command class(es) we need.
*/
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
md = mmc_blk_alloc(card);
if (IS_ERR(md))
return PTR_ERR(md);
err = mmc_blk_set_blksize(md, card);
if (err)
goto out;
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
printk(KERN_INFO "%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");
mmc_set_drvdata(card, md);
add_disk(md->disk);
return 0;
out:
mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
return err;
}
static void mmc_blk_remove(struct mmc_card *card)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
/* Stop new requests from getting into the queue */
del_gendisk(md->disk);
/* Then flush out any already in there */
mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
}
mmc_set_drvdata(card, NULL);
}
#ifdef CONFIG_PM
static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
mmc_queue_suspend(&md->queue);
}
return 0;
}
static int mmc_blk_resume(struct mmc_card *card)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
mmc_blk_set_blksize(md, card);
mmc_queue_resume(&md->queue);
}
return 0;
}
#else
#define mmc_blk_suspend NULL
#define mmc_blk_resume NULL
#endif
static struct mmc_driver mmc_driver = {
.drv = {
.name = "mmcblk",
},
.probe = mmc_blk_probe,
.remove = mmc_blk_remove,
.suspend = mmc_blk_suspend,
.resume = mmc_blk_resume,
};
static int __init mmc_blk_init(void)
{
int res;
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
goto out;
res = mmc_register_driver(&mmc_driver);
if (res)
goto out2;
return 0;
out2:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
out:
return res;
}
static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
}
module_init(mmc_blk_init);
module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,364 @@
/*
* linux/drivers/mmc/card/queue.c
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "queue.h"
#define MMC_QUEUE_BOUNCESZ 65536
#define MMC_QUEUE_SUSPENDED (1 << 0)
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
/*
* We only like normal block requests.
*/
if (!blk_fs_request(req)) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
}
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
struct request *req = NULL;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
if (!req) {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
continue;
}
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
} while (1);
up(&mq->thread_sem);
return 0;
}
/*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
req->cmd_flags |= REQ_QUIET;
__blk_end_request_all(req, -EIO);
}
return;
}
if (!mq->req)
wake_up_process(mq->thread);
}
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
*
* Initialise a MMC card request queue.
*/
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
mq->card = card;
mq->queue = blk_init_queue(mmc_request, lock);
if (!mq->queue)
return -ENOMEM;
mq->queue->queuedata = mq;
mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_hw_segs == 1) {
unsigned int bouncesz;
bouncesz = MMC_QUEUE_BOUNCESZ;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mq->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
"allocate bounce buffer\n",
mmc_card_name(card));
}
}
if (mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
mq->sg = kmalloc(sizeof(struct scatterlist),
GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
sg_init_table(mq->sg, 1);
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
bouncesz / 512, GFP_KERNEL);
if (!mq->bounce_sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
if (!mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->sg = kmalloc(sizeof(struct scatterlist) *
host->max_phys_segs, GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
sg_init_table(mq->sg, host->max_phys_segs);
}
init_MUTEX(&mq->thread_sem);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto free_bounce_sg;
}
return 0;
free_bounce_sg:
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
cleanup_queue:
if (mq->sg)
kfree(mq->sg);
mq->sg = NULL;
if (mq->bounce_buf)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
blk_cleanup_queue(mq->queue);
return ret;
}
void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
/* Then terminate our worker thread */
kthread_stop(mq->thread);
/* Empty the queue */
spin_lock_irqsave(q->queue_lock, flags);
q->queuedata = NULL;
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
kfree(mq->sg);
mq->sg = NULL;
if (mq->bounce_buf)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
*
* Stop the block request queue, and wait for our thread to
* complete any outstanding requests. This ensures that we
* won't suspend while a request is being processed.
*/
void mmc_queue_suspend(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
mq->flags |= MMC_QUEUE_SUSPENDED;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
down(&mq->thread_sem);
}
}
/**
* mmc_queue_resume - resume a previously suspended MMC request queue
* @mq: MMC queue to resume
*/
void mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->flags & MMC_QUEUE_SUSPENDED) {
mq->flags &= ~MMC_QUEUE_SUSPENDED;
up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
if (!mq->bounce_buf)
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
BUG_ON(!mq->bounce_sg);
sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
mq->bounce_sg_len = sg_len;
buflen = 0;
for_each_sg(mq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(mq->sg, mq->bounce_buf, buflen);
return 1;
}
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
unsigned long flags;
if (!mq->bounce_buf)
return;
if (rq_data_dir(mq->req) != WRITE)
return;
local_irq_save(flags);
sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
mq->bounce_buf, mq->sg[0].length);
local_irq_restore(flags);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
unsigned long flags;
if (!mq->bounce_buf)
return;
if (rq_data_dir(mq->req) != READ)
return;
local_irq_save(flags);
sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
mq->bounce_buf, mq->sg[0].length);
local_irq_restore(flags);
}

View File

@@ -0,0 +1,31 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
struct request;
struct task_struct;
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
struct request *req;
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
struct scatterlist *sg;
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
extern void mmc_queue_bounce_pre(struct mmc_queue *);
extern void mmc_queue_bounce_post(struct mmc_queue *);
#endif

File diff suppressed because it is too large Load Diff