add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,98 @@
menu "DMA support"
config SH_DMA
bool "SuperH on-chip DMA controller (DMAC) support"
depends on CPU_SH3 || CPU_SH4 && !CPU_SUBTYPE_ST40
select SH_DMA_API
default n
help
Selecting this option will allow the use of the Hitachi/Renesas
DMA controller (DMAC). The DMAC is programmed using the generic
SH DMA API, although a wrapper providing compatibility with the
ISA DMA API used on PC's is also available.
config SH_DMA_IRQ_MULTI
bool
depends on SH_DMA
default y if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \
CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7750R || \
CPU_SUBTYPE_SH7751R || CPU_SUBTYPE_SH7091 || \
CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7764 || \
CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \
CPU_SUBTYPE_SH7760
config SH_DMA_API
depends on SH_DMA
bool "SuperH DMA API support"
default n
help
SH_DMA_API always enabled DMA API of used SuperH.
If you want to use DMA ENGINE, you must not enable this.
Please enable DMA_ENGINE and SH_DMAE.
config NR_ONCHIP_DMA_CHANNELS
int
depends on SH_DMA
default "4" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \
CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7091
default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R || \
CPU_SUBTYPE_SH7760
default "12" if CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7780 || \
CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7724
default "6"
help
This allows you to specify the number of channels that the on-chip
DMAC supports. This will be 4 for SH7750/SH7751/Sh7750S/SH7091 and 8 for the
SH7750R/SH7751R/SH7760, 12 for the SH7723/SH7780/SH7785/SH7724, default is 6.
config NR_DMA_CHANNELS_BOOL
depends on SH_DMA
bool "Override default number of maximum DMA channels"
help
This allows you to forcibly update the maximum number of supported
DMA channels for a given board. If this is unset, this will default
to the number of channels that the on-chip DMAC has.
config NR_DMA_CHANNELS
int "Maximum number of DMA channels"
depends on SH_DMA && NR_DMA_CHANNELS_BOOL
default NR_ONCHIP_DMA_CHANNELS
help
This allows you to specify the maximum number of DMA channels to
support. Setting this to a higher value allows for cascading DMACs
with additional channels.
config SH_DMABRG
bool "SH7760 DMABRG support"
depends on CPU_SUBTYPE_SH7760
help
The DMABRG does data transfers from main memory to Audio/USB units
of the SH7760.
Say Y if you want to use Audio/USB DMA on your SH7760 board.
config PVR2_DMA
tristate "PowerVR 2 DMAC support"
depends on SH_DREAMCAST && SH_DMA
help
Selecting this will enable support for the PVR2 DMA controller.
As this chains off of the on-chip DMAC, that must also be
enabled by default.
This is primarily used by the pvr2fb framebuffer driver for
certain optimizations, but is not necessary for functionality.
If in doubt, say N.
config G2_DMA
tristate "G2 Bus DMA support"
depends on SH_DREAMCAST
select SH_DMA_API
help
This enables support for the DMA controller for the Dreamcast's
G2 bus. Drivers that want this will generally enable this on
their own.
If in doubt, say N.
endmenu

View File

@@ -0,0 +1,9 @@
#
# Makefile for the SuperH DMA specific kernel interface routines under Linux.
#
obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o
obj-$(CONFIG_SH_DMA) += dma-sh.o
obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o
obj-$(CONFIG_G2_DMA) += dma-g2.o
obj-$(CONFIG_SH_DMABRG) += dmabrg.o

View File

@@ -0,0 +1,442 @@
/*
* arch/sh/drivers/dma/dma-api.c
*
* SuperH-specific DMA management API
*
* Copyright (C) 2003, 2004, 2005 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/dma.h>
DEFINE_SPINLOCK(dma_spin_lock);
static LIST_HEAD(registered_dmac_list);
struct dma_info *get_dma_info(unsigned int vchan)
{
struct dma_info *info;
/*
* Look for each DMAC's range to determine who the owner of
* the channel is.
*/
list_for_each_entry(info, &registered_dmac_list, list) {
if ((vchan < info->first_vchannel_nr) ||
(vchan >= info->first_vchannel_nr + info->nr_channels))
continue;
return info;
}
return NULL;
}
EXPORT_SYMBOL(get_dma_info);
struct dma_info *get_dma_info_by_name(const char *dmac_name)
{
struct dma_info *info;
list_for_each_entry(info, &registered_dmac_list, list) {
if (dmac_name && (strcmp(dmac_name, info->name) != 0))
continue;
else
return info;
}
return NULL;
}
EXPORT_SYMBOL(get_dma_info_by_name);
static unsigned int get_nr_channels(void)
{
struct dma_info *info;
unsigned int nr = 0;
if (unlikely(list_empty(&registered_dmac_list)))
return nr;
list_for_each_entry(info, &registered_dmac_list, list)
nr += info->nr_channels;
return nr;
}
struct dma_channel *get_dma_channel(unsigned int vchan)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel;
int i;
if (unlikely(!info))
return ERR_PTR(-EINVAL);
for (i = 0; i < info->nr_channels; i++) {
channel = &info->channels[i];
if (channel->vchan == vchan)
return channel;
}
return NULL;
}
EXPORT_SYMBOL(get_dma_channel);
int get_dma_residue(unsigned int vchan)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel = get_dma_channel(vchan);
if (info->ops->get_residue)
return info->ops->get_residue(channel);
return 0;
}
EXPORT_SYMBOL(get_dma_residue);
static int search_cap(const char **haystack, const char *needle)
{
const char **p;
for (p = haystack; *p; p++)
if (strcmp(*p, needle) == 0)
return 1;
return 0;
}
static int matching_dmac(const char*dmac_req, const char* dmac_inst)
{
char dr, di;
while ( (dr = *dmac_req) == (di = *dmac_inst) ) {
if (dr == '\0')
return 0;
dmac_req++;
dmac_inst++;
}
if ((dr == '\0') && (di == '.'))
return 0;
return 1;
}
/**
* request_dma_bycap - Allocate a DMA channel based on its capabilities
* @dmac: List of DMA controllers to search
* @caps: List of capabilities
*
* Search all channels of all DMA controllers to find a channel which
* matches the requested capabilities. The result is the channel
* number if a match is found, or %-ENODEV if no match is found.
*
* Note that not all DMA controllers export capabilities, in which
* case they can never be allocated using this API, and so
* request_dma() must be used specifying the channel number.
*/
int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
{
struct dma_info *info;
const char **p;
int i;
int found;
BUG_ON(!dmac || !caps);
list_for_each_entry(info, &registered_dmac_list, list)
if (matching_dmac(*dmac, info->name) == 0) {
for (i = 0; i < info->nr_channels; i++) {
struct dma_channel *channel =
&info->channels[i];
if (unlikely(!channel->caps))
continue;
found = 1;
for (p = caps; *p; p++)
if (!search_cap(channel->caps, *p)) {
found = 0;
break;
}
if (!found)
continue;
if (request_dma(channel->vchan, dev_id) == 0)
return channel->vchan;
}
}
return -ENODEV;
}
EXPORT_SYMBOL(request_dma_bycap);
int dmac_search_free_channel(const char *dev_id)
{
struct dma_channel *channel = { 0 };
struct dma_info *info = get_dma_info(0);
int i;
for (i = 0; i < info->nr_channels; i++) {
channel = &info->channels[i];
if (unlikely(!channel))
return -ENODEV;
if (atomic_read(&channel->busy) == 0)
break;
}
if (info->ops->request) {
int result = info->ops->request(channel);
if (result)
return result;
atomic_set(&channel->busy, 1);
return channel->vchan;
}
return -ENOSYS;
}
int request_dma(unsigned int vchan, const char *dev_id)
{
struct dma_channel *channel = { 0 };
struct dma_info *info;
int result;
#if defined(CONFIG_STM_DMA)
if (DMA_REQ_ANY_CHANNEL == vchan)
return dmac_search_free_channel(dev_id);
#endif
info = get_dma_info(vchan);
if (!info)
return -EINVAL;
channel = get_dma_channel(vchan);
if (atomic_xchg(&channel->busy, 1))
return -EBUSY;
strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
if (info->ops->request) {
result = info->ops->request(channel);
if (result)
atomic_set(&channel->busy, 0);
return result;
}
return 0;
}
EXPORT_SYMBOL(request_dma);
void free_dma(unsigned int vchan)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel = get_dma_channel(vchan);
if (info->ops->free)
info->ops->free(channel);
atomic_set(&channel->busy, 0);
}
EXPORT_SYMBOL(free_dma);
void dma_wait_for_completion(unsigned int vchan)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel = get_dma_channel(vchan);
if (channel->flags & DMA_TEI_CAPABLE) {
wait_event(channel->wait_queue,
(info->ops->get_residue(channel) == 0));
return;
}
while (info->ops->get_residue(channel))
cpu_relax();
}
EXPORT_SYMBOL(dma_wait_for_completion);
int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
{
struct dma_info *info;
unsigned int found = 0;
int i;
list_for_each_entry(info, &registered_dmac_list, list)
if (strcmp(dmac, info->name) == 0) {
found = 1;
break;
}
if (unlikely(!found))
return -ENODEV;
for (i = 0; i < info->nr_channels; i++, caps++) {
struct dma_channel *channel;
if ((info->first_channel_nr + i) != caps->ch_num)
return -EINVAL;
channel = &info->channels[i];
channel->caps = caps->caplist;
}
return 0;
}
EXPORT_SYMBOL(register_chan_caps);
void dma_configure_channel(unsigned int vchan, unsigned long flags)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel = get_dma_channel(vchan);
if (info->ops->configure)
info->ops->configure(channel, flags);
}
EXPORT_SYMBOL(dma_configure_channel);
int dma_xfer(unsigned int vchan, unsigned long from,
unsigned long to, size_t size, unsigned int mode)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel = get_dma_channel(vchan);
return info->ops->xfer(channel, from, to, size, mode);
}
EXPORT_SYMBOL(dma_xfer);
int dma_extend(unsigned int vchan, unsigned long op, void *param)
{
struct dma_info *info = get_dma_info(vchan);
struct dma_channel *channel = get_dma_channel(vchan);
if (info->ops->extend)
return info->ops->extend(channel, op, param);
return -ENOSYS;
}
EXPORT_SYMBOL(dma_extend);
static int dma_read_proc(char *buf, char **start, off_t off,
int len, int *eof, void *data)
{
struct dma_info *info;
char *p = buf;
if (list_empty(&registered_dmac_list))
return 0;
/*
* Iterate over each registered DMAC
*/
list_for_each_entry(info, &registered_dmac_list, list) {
int i;
/*
* Iterate over each channel
*/
for (i = 0; i < info->nr_channels; i++) {
struct dma_channel *channel = info->channels + i;
if(atomic_read(&channel->busy) == 0)
continue;
p += sprintf(p, "%2d: %14s %s\n", i,
info->name, channel->dev_id);
}
}
return p - buf;
}
int register_dmac(struct dma_info *info)
{
unsigned int total_channels, i;
INIT_LIST_HEAD(&info->list);
printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
info->name, info->nr_channels, info->nr_channels > 1 ? "s" : "");
BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
info->pdev = platform_device_register_simple(info->name, -1,
NULL, 0);
if (IS_ERR(info->pdev))
return PTR_ERR(info->pdev);
/*
* Don't touch pre-configured channels
*/
if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) {
unsigned int size;
size = sizeof(struct dma_channel) * info->nr_channels;
info->channels = kzalloc(size, GFP_KERNEL);
if (!info->channels)
return -ENOMEM;
}
total_channels = get_nr_channels();
info->first_vchannel_nr = total_channels;
for (i = 0; i < info->nr_channels; i++) {
struct dma_channel *chan = &info->channels[i];
atomic_set(&chan->busy, 0);
chan->chan = info->first_channel_nr + i;
chan->vchan = info->first_channel_nr + i + total_channels;
memcpy(chan->dev_id, "Unused", 7);
if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
chan->flags |= DMA_TEI_CAPABLE;
init_waitqueue_head(&chan->wait_queue);
dma_create_sysfs_files(chan, info);
}
list_add(&info->list, &registered_dmac_list);
return 0;
}
EXPORT_SYMBOL(register_dmac);
void unregister_dmac(struct dma_info *info)
{
unsigned int i;
for (i = 0; i < info->nr_channels; i++)
dma_remove_sysfs_files(info->channels + i, info);
if (!(info->flags & DMAC_CHANNELS_CONFIGURED))
kfree(info->channels);
list_del(&info->list);
platform_device_unregister(info->pdev);
}
EXPORT_SYMBOL(unregister_dmac);
static int __init dma_api_init(void)
{
printk(KERN_NOTICE "DMA: Registering DMA API.\n");
create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
return 0;
}
subsys_initcall(dma_api_init);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("DMA API for SuperH");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,200 @@
/*
* arch/sh/drivers/dma/dma-g2.c
*
* G2 bus DMA support
*
* Copyright (C) 2003 - 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/cacheflush.h>
#include <mach/sysasic.h>
#include <mach/dma.h>
#include <asm/dma.h>
struct g2_channel {
unsigned long g2_addr; /* G2 bus address */
unsigned long root_addr; /* Root bus (SH-4) address */
unsigned long size; /* Size (in bytes), 32-byte aligned */
unsigned long direction; /* Transfer direction */
unsigned long ctrl; /* Transfer control */
unsigned long chan_enable; /* Channel enable */
unsigned long xfer_enable; /* Transfer enable */
unsigned long xfer_stat; /* Transfer status */
} __attribute__ ((aligned(32)));
struct g2_status {
unsigned long g2_addr;
unsigned long root_addr;
unsigned long size;
unsigned long status;
} __attribute__ ((aligned(16)));
struct g2_dma_info {
struct g2_channel channel[G2_NR_DMA_CHANNELS];
unsigned long pad1[G2_NR_DMA_CHANNELS];
unsigned long wait_state;
unsigned long pad2[10];
unsigned long magic;
struct g2_status status[G2_NR_DMA_CHANNELS];
} __attribute__ ((aligned(256)));
static volatile struct g2_dma_info *g2_dma = (volatile struct g2_dma_info *)0xa05f7800;
#define g2_bytes_remaining(i) \
((g2_dma->channel[i].size - \
g2_dma->status[i].size) & 0x0fffffff)
static irqreturn_t g2_dma_interrupt(int irq, void *dev_id)
{
int i;
for (i = 0; i < G2_NR_DMA_CHANNELS; i++) {
if (g2_dma->status[i].status & 0x20000000) {
unsigned int bytes = g2_bytes_remaining(i);
if (likely(bytes == 0)) {
struct dma_info *info = dev_id;
struct dma_channel *chan = info->channels + i;
wake_up(&chan->wait_queue);
return IRQ_HANDLED;
}
}
}
return IRQ_NONE;
}
static int g2_enable_dma(struct dma_channel *chan)
{
unsigned int chan_nr = chan->chan;
g2_dma->channel[chan_nr].chan_enable = 1;
g2_dma->channel[chan_nr].xfer_enable = 1;
return 0;
}
static int g2_disable_dma(struct dma_channel *chan)
{
unsigned int chan_nr = chan->chan;
g2_dma->channel[chan_nr].chan_enable = 0;
g2_dma->channel[chan_nr].xfer_enable = 0;
return 0;
}
static int g2_xfer_dma(struct dma_channel *chan)
{
unsigned int chan_nr = chan->chan;
if (chan->sar & 31) {
printk("g2dma: unaligned source 0x%lx\n", chan->sar);
return -EINVAL;
}
if (chan->dar & 31) {
printk("g2dma: unaligned dest 0x%lx\n", chan->dar);
return -EINVAL;
}
/* Align the count */
if (chan->count & 31)
chan->count = (chan->count + (32 - 1)) & ~(32 - 1);
/* Fixup destination */
chan->dar += 0xa0800000;
/* Fixup direction */
chan->mode = !chan->mode;
flush_icache_range((unsigned long)chan->sar, chan->count);
g2_disable_dma(chan);
g2_dma->channel[chan_nr].g2_addr = chan->dar & 0x1fffffe0;
g2_dma->channel[chan_nr].root_addr = chan->sar & 0x1fffffe0;
g2_dma->channel[chan_nr].size = (chan->count & ~31) | 0x80000000;
g2_dma->channel[chan_nr].direction = chan->mode;
/*
* bit 0 - ???
* bit 1 - if set, generate a hardware event on transfer completion
* bit 2 - ??? something to do with suspend?
*/
g2_dma->channel[chan_nr].ctrl = 5; /* ?? */
g2_enable_dma(chan);
/* debug cruft */
pr_debug("count, sar, dar, mode, ctrl, chan, xfer: %ld, 0x%08lx, "
"0x%08lx, %ld, %ld, %ld, %ld\n",
g2_dma->channel[chan_nr].size,
g2_dma->channel[chan_nr].root_addr,
g2_dma->channel[chan_nr].g2_addr,
g2_dma->channel[chan_nr].direction,
g2_dma->channel[chan_nr].ctrl,
g2_dma->channel[chan_nr].chan_enable,
g2_dma->channel[chan_nr].xfer_enable);
return 0;
}
static int g2_get_residue(struct dma_channel *chan)
{
return g2_bytes_remaining(chan->chan);
}
static struct dma_ops g2_dma_ops = {
.xfer = g2_xfer_dma,
.get_residue = g2_get_residue,
};
static struct dma_info g2_dma_info = {
.name = "g2_dmac",
.nr_channels = 4,
.ops = &g2_dma_ops,
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
static int __init g2_dma_init(void)
{
int ret;
ret = request_irq(HW_EVENT_G2_DMA, g2_dma_interrupt, IRQF_DISABLED,
"g2 DMA handler", &g2_dma_info);
if (unlikely(ret))
return -EINVAL;
/* Magic */
g2_dma->wait_state = 27;
g2_dma->magic = 0x4659404f;
ret = register_dmac(&g2_dma_info);
if (unlikely(ret != 0))
free_irq(HW_EVENT_G2_DMA, 0);
return ret;
}
static void __exit g2_dma_exit(void)
{
free_irq(HW_EVENT_G2_DMA, 0);
unregister_dmac(&g2_dma_info);
}
subsys_initcall(g2_dma_init);
module_exit(g2_dma_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("G2 bus DMA driver");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,109 @@
/*
* arch/sh/drivers/dma/dma-pvr2.c
*
* NEC PowerVR 2 (Dreamcast) DMA support
*
* Copyright (C) 2003, 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <mach/sysasic.h>
#include <mach/dma.h>
#include <asm/dma.h>
#include <asm/io.h>
static unsigned int xfer_complete;
static int count;
static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id)
{
if (get_dma_residue(PVR2_CASCADE_CHAN)) {
printk(KERN_WARNING "DMA: SH DMAC did not complete transfer "
"on channel %d, waiting..\n", PVR2_CASCADE_CHAN);
dma_wait_for_completion(PVR2_CASCADE_CHAN);
}
if (count++ < 10)
pr_debug("Got a pvr2 dma interrupt for channel %d\n",
irq - HW_EVENT_PVR2_DMA);
xfer_complete = 1;
return IRQ_HANDLED;
}
static int pvr2_request_dma(struct dma_channel *chan)
{
if (ctrl_inl(PVR2_DMA_MODE) != 0)
return -EBUSY;
ctrl_outl(0, PVR2_DMA_LMMODE0);
return 0;
}
static int pvr2_get_dma_residue(struct dma_channel *chan)
{
return xfer_complete == 0;
}
static int pvr2_xfer_dma(struct dma_channel *chan)
{
if (chan->sar || !chan->dar)
return -EINVAL;
xfer_complete = 0;
ctrl_outl(chan->dar, PVR2_DMA_ADDR);
ctrl_outl(chan->count, PVR2_DMA_COUNT);
ctrl_outl(chan->mode & DMA_MODE_MASK, PVR2_DMA_MODE);
return 0;
}
static struct irqaction pvr2_dma_irq = {
.name = "pvr2 DMA handler",
.handler = pvr2_dma_interrupt,
.flags = IRQF_DISABLED,
};
static struct dma_ops pvr2_dma_ops = {
.request = pvr2_request_dma,
.get_residue = pvr2_get_dma_residue,
.xfer = pvr2_xfer_dma,
};
static struct dma_info pvr2_dma_info = {
.name = "pvr2_dmac",
.nr_channels = 1,
.ops = &pvr2_dma_ops,
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
static int __init pvr2_dma_init(void)
{
setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq);
request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");
return register_dmac(&pvr2_dma_info);
}
static void __exit pvr2_dma_exit(void)
{
free_dma(PVR2_CASCADE_CHAN);
free_irq(HW_EVENT_PVR2_DMA, 0);
unregister_dmac(&pvr2_dma_info);
}
subsys_initcall(pvr2_dma_init);
module_exit(pvr2_dma_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("NEC PowerVR 2 DMA driver");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,350 @@
/*
* arch/sh/drivers/dma/dma-sh.c
*
* SuperH On-chip DMAC Support
*
* Copyright (C) 2000 Takashi YOSHII
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2005 Andriy Skulysh
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <mach-dreamcast/mach/dma.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/dma-sh.h>
#if defined(DMAE1_IRQ)
#define NR_DMAE 2
#else
#define NR_DMAE 1
#endif
static const char *dmae_name[] = {
"DMAC Address Error0", "DMAC Address Error1"
};
static inline unsigned int get_dmte_irq(unsigned int chan)
{
unsigned int irq = 0;
if (chan < ARRAY_SIZE(dmte_irq_map))
irq = dmte_irq_map[chan];
#if defined(CONFIG_SH_DMA_IRQ_MULTI)
if (irq > DMTE6_IRQ)
return DMTE6_IRQ;
return DMTE0_IRQ;
#else
return irq;
#endif
}
/*
* We determine the correct shift size based off of the CHCR transmit size
* for the given channel. Since we know that it will take:
*
* info->count >> ts_shift[transmit_size]
*
* iterations to complete the transfer.
*/
static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
{
u32 chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT];
}
/*
* The transfer end interrupt must read the chcr register to end the
* hardware interrupt active condition.
* Besides that it needs to waken any waiting process, which should handle
* setting up the next transfer.
*/
static irqreturn_t dma_tei(int irq, void *dev_id)
{
struct dma_channel *chan = dev_id;
u32 chcr;
chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
if (!(chcr & CHCR_TE))
return IRQ_NONE;
chcr &= ~(CHCR_IE | CHCR_DE);
ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
wake_up(&chan->wait_queue);
return IRQ_HANDLED;
}
static int sh_dmac_request_dma(struct dma_channel *chan)
{
if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
return 0;
return request_irq(get_dmte_irq(chan->chan), dma_tei,
#if defined(CONFIG_SH_DMA_IRQ_MULTI)
IRQF_SHARED,
#else
IRQF_DISABLED,
#endif
chan->dev_id, chan);
}
static void sh_dmac_free_dma(struct dma_channel *chan)
{
free_irq(get_dmte_irq(chan->chan), chan);
}
static int
sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
{
if (!chcr)
chcr = RS_DUAL | CHCR_IE;
if (chcr & CHCR_IE) {
chcr &= ~CHCR_IE;
chan->flags |= DMA_TEI_CAPABLE;
} else {
chan->flags &= ~DMA_TEI_CAPABLE;
}
ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
chan->flags |= DMA_CONFIGURED;
return 0;
}
static void sh_dmac_enable_dma(struct dma_channel *chan)
{
int irq;
u32 chcr;
chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
chcr |= CHCR_DE;
if (chan->flags & DMA_TEI_CAPABLE)
chcr |= CHCR_IE;
ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
if (chan->flags & DMA_TEI_CAPABLE) {
irq = get_dmte_irq(chan->chan);
enable_irq(irq);
}
}
static void sh_dmac_disable_dma(struct dma_channel *chan)
{
int irq;
u32 chcr;
if (chan->flags & DMA_TEI_CAPABLE) {
irq = get_dmte_irq(chan->chan);
disable_irq(irq);
}
chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
}
static int sh_dmac_xfer_dma(struct dma_channel *chan)
{
/*
* If we haven't pre-configured the channel with special flags, use
* the defaults.
*/
if (unlikely(!(chan->flags & DMA_CONFIGURED)))
sh_dmac_configure_channel(chan, 0);
sh_dmac_disable_dma(chan);
/*
* Single-address mode usage note!
*
* It's important that we don't accidentally write any value to SAR/DAR
* (this includes 0) that hasn't been directly specified by the user if
* we're in single-address mode.
*
* In this case, only one address can be defined, anything else will
* result in a DMA address error interrupt (at least on the SH-4),
* which will subsequently halt the transfer.
*
* Channel 2 on the Dreamcast is a special case, as this is used for
* cascading to the PVR2 DMAC. In this case, we still need to write
* SAR and DAR, regardless of value, in order for cascading to work.
*/
if (chan->sar || (mach_is_dreamcast() &&
chan->chan == PVR2_CASCADE_CHAN))
ctrl_outl(chan->sar, (dma_base_addr[chan->chan]+SAR));
if (chan->dar || (mach_is_dreamcast() &&
chan->chan == PVR2_CASCADE_CHAN))
ctrl_outl(chan->dar, (dma_base_addr[chan->chan] + DAR));
ctrl_outl(chan->count >> calc_xmit_shift(chan),
(dma_base_addr[chan->chan] + TCR));
sh_dmac_enable_dma(chan);
return 0;
}
static int sh_dmac_get_dma_residue(struct dma_channel *chan)
{
if (!(ctrl_inl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE))
return 0;
return ctrl_inl(dma_base_addr[chan->chan] + TCR)
<< calc_xmit_shift(chan);
}
static inline int dmaor_reset(int no)
{
unsigned long dmaor = dmaor_read_reg(no);
/* Try to clear the error flags first, incase they are set */
dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
dmaor_write_reg(no, dmaor);
dmaor |= DMAOR_INIT;
dmaor_write_reg(no, dmaor);
/* See if we got an error again */
if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
#if defined(CONFIG_CPU_SH4)
static irqreturn_t dma_err(int irq, void *dummy)
{
#if defined(CONFIG_SH_DMA_IRQ_MULTI)
int cnt = 0;
switch (irq) {
#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
case DMTE6_IRQ:
cnt++;
#endif
case DMTE0_IRQ:
if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
disable_irq(irq);
/* DMA multi and error IRQ */
return IRQ_HANDLED;
}
default:
return IRQ_NONE;
}
#else
dmaor_reset(0);
#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
defined(CONFIG_CPU_SUBTYPE_SH7785)
dmaor_reset(1);
#endif
disable_irq(irq);
return IRQ_HANDLED;
#endif
}
#endif
static struct dma_ops sh_dmac_ops = {
.request = sh_dmac_request_dma,
.free = sh_dmac_free_dma,
.get_residue = sh_dmac_get_dma_residue,
.xfer = sh_dmac_xfer_dma,
.configure = sh_dmac_configure_channel,
};
static struct dma_info sh_dmac_info = {
.name = "sh_dmac",
.nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS,
.ops = &sh_dmac_ops,
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
#ifdef CONFIG_CPU_SH4
static unsigned int get_dma_error_irq(int n)
{
#if defined(CONFIG_SH_DMA_IRQ_MULTI)
return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6);
#else
return (n == 0) ? DMAE0_IRQ :
#if defined(DMAE1_IRQ)
DMAE1_IRQ;
#else
-1;
#endif
#endif
}
#endif
static int __init sh_dmac_init(void)
{
struct dma_info *info = &sh_dmac_info;
int i;
#ifdef CONFIG_CPU_SH4
int n;
for (n = 0; n < NR_DMAE; n++) {
i = request_irq(get_dma_error_irq(n), dma_err,
#if defined(CONFIG_SH_DMA_IRQ_MULTI)
IRQF_SHARED,
#else
IRQF_DISABLED,
#endif
dmae_name[n], (void *)dmae_name[n]);
if (unlikely(i < 0)) {
printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
return i;
}
}
#endif /* CONFIG_CPU_SH4 */
/*
* Initialize DMAOR, and clean up any error flags that may have
* been set.
*/
i = dmaor_reset(0);
if (unlikely(i != 0))
return i;
#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
defined(CONFIG_CPU_SUBTYPE_SH7785)
i = dmaor_reset(1);
if (unlikely(i != 0))
return i;
#endif
return register_dmac(info);
}
static void __exit sh_dmac_exit(void)
{
#ifdef CONFIG_CPU_SH4
int n;
for (n = 0; n < NR_DMAE; n++) {
free_irq(get_dma_error_irq(n), (void *)dmae_name[n]);
}
#endif /* CONFIG_CPU_SH4 */
unregister_dmac(&sh_dmac_info);
}
subsys_initcall(sh_dmac_init);
module_exit(sh_dmac_exit);
MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,167 @@
/*
* arch/sh/drivers/dma/dma-sysfs.c
*
* sysfs interface for SH DMA API
*
* Copyright (C) 2004 - 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sysdev.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/string.h>
#include <asm/dma.h>
static struct sysdev_class dma_sysclass = {
.name = "dma",
};
EXPORT_SYMBOL(dma_sysclass);
static ssize_t dma_show_devices(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
ssize_t len = 0;
int i;
for (i = 0; get_dma_info(i) != NULL; i++) {
struct dma_info *info = get_dma_info(i);
struct dma_channel *channel = get_dma_channel(i);
if (unlikely(!info) || !channel)
continue;
len += sprintf(buf + len, "%2d: %14s %s\n",
channel->chan, info->name,
channel->dev_id);
}
return len;
}
static SYSDEV_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
static int __init dma_sysclass_init(void)
{
int ret;
ret = sysdev_class_register(&dma_sysclass);
if (unlikely(ret))
return ret;
return sysfs_create_file(&dma_sysclass.kset.kobj, &attr_devices.attr);
}
postcore_initcall(dma_sysclass_init);
static ssize_t dma_show_dev_id(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
struct dma_channel *channel = to_dma_channel(dev);
return sprintf(buf, "%s\n", channel->dev_id);
}
static ssize_t dma_store_dev_id(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
strcpy(channel->dev_id, buf);
return count;
}
static SYSDEV_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
static ssize_t dma_store_config(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
unsigned long config;
config = simple_strtoul(buf, NULL, 0);
dma_configure_channel(channel->vchan, config);
return count;
}
static SYSDEV_ATTR(config, S_IWUSR, NULL, dma_store_config);
static ssize_t dma_show_mode(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
struct dma_channel *channel = to_dma_channel(dev);
return sprintf(buf, "0x%08x\n", channel->mode);
}
static ssize_t dma_store_mode(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
channel->mode = simple_strtoul(buf, NULL, 0);
return count;
}
static SYSDEV_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
#define dma_ro_attr(field, fmt) \
static ssize_t dma_show_##field(struct sys_device *dev, \
struct sysdev_attribute *attr, char *buf)\
{ \
struct dma_channel *channel = to_dma_channel(dev); \
return sprintf(buf, fmt, channel->field); \
} \
static SYSDEV_ATTR(field, S_IRUGO, dma_show_##field, NULL);
dma_ro_attr(count, "0x%08x\n");
dma_ro_attr(flags, "0x%08lx\n");
int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
{
struct sys_device *dev = &chan->dev;
char name[16];
int ret;
dev->id = chan->vchan;
dev->cls = &dma_sysclass;
ret = sysdev_register(dev);
if (ret)
return ret;
ret |= sysdev_create_file(dev, &attr_dev_id);
ret |= sysdev_create_file(dev, &attr_count);
ret |= sysdev_create_file(dev, &attr_mode);
ret |= sysdev_create_file(dev, &attr_flags);
ret |= sysdev_create_file(dev, &attr_config);
if (unlikely(ret)) {
dev_err(&info->pdev->dev, "Failed creating attrs\n");
return ret;
}
snprintf(name, sizeof(name), "dma%d", chan->chan);
return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name);
}
void dma_remove_sysfs_files(struct dma_channel *chan, struct dma_info *info)
{
struct sys_device *dev = &chan->dev;
char name[16];
sysdev_remove_file(dev, &attr_dev_id);
sysdev_remove_file(dev, &attr_count);
sysdev_remove_file(dev, &attr_mode);
sysdev_remove_file(dev, &attr_flags);
sysdev_remove_file(dev, &attr_config);
snprintf(name, sizeof(name), "dma%d", chan->chan);
sysfs_remove_link(&info->pdev->dev.kobj, name);
sysdev_unregister(dev);
}

View File

@@ -0,0 +1,196 @@
/*
* SH7760 DMABRG IRQ handling
*
* (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
* licensed under the GPLv2.
*
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/dma.h>
#include <asm/dmabrg.h>
#include <asm/io.h>
/*
* The DMABRG is a special DMA unit within the SH7760. It does transfers
* from USB-SRAM/Audio units to main memory (and also the LCDC; but that
* part is sensibly placed in the LCDC registers and requires no irqs)
* It has 3 IRQ lines which trigger 10 events, and works independently
* from the traditional SH DMAC (although it blocks usage of DMAC 0)
*
* BRGIRQID | component | dir | meaning | source
* -----------------------------------------------------
* 0 | USB-DMA | ... | xfer done | DMABRGI1
* 1 | USB-UAE | ... | USB addr err.| DMABRGI0
* 2 | HAC0/SSI0 | play| all done | DMABRGI1
* 3 | HAC0/SSI0 | play| half done | DMABRGI2
* 4 | HAC0/SSI0 | rec | all done | DMABRGI1
* 5 | HAC0/SSI0 | rec | half done | DMABRGI2
* 6 | HAC1/SSI1 | play| all done | DMABRGI1
* 7 | HAC1/SSI1 | play| half done | DMABRGI2
* 8 | HAC1/SSI1 | rec | all done | DMABRGI1
* 9 | HAC1/SSI1 | rec | half done | DMABRGI2
*
* all can be enabled/disabled in the DMABRGCR register,
* as well as checked if they occurred.
*
* DMABRGI0 services USB DMA Address errors, but it still must be
* enabled/acked in the DMABRGCR register. USB-DMA complete indicator
* is grouped together with the audio buffer end indicators, too bad...
*
* DMABRGCR: Bits 31-24: audio-dma ENABLE flags,
* Bits 23-16: audio-dma STATUS flags,
* Bits 9-8: USB error/xfer ENABLE,
* Bits 1-0: USB error/xfer STATUS.
* Ack an IRQ by writing 0 to the STATUS flag.
* Mask IRQ by writing 0 to ENABLE flag.
*
* Usage is almost like with any other IRQ:
* dmabrg_request_irq(BRGIRQID, handler, data)
* dmabrg_free_irq(BRGIRQID)
*
* handler prototype: void brgirqhandler(void *data)
*/
#define DMARSRA 0xfe090000
#define DMAOR 0xffa00040
#define DMACHCR0 0xffa0000c
#define DMABRGCR 0xfe3c0000
#define DMAOR_BRG 0x0000c000
#define DMAOR_DMEN 0x00000001
#define DMABRGI0 68
#define DMABRGI1 69
#define DMABRGI2 70
struct dmabrg_handler {
void (*handler)(void *);
void *data;
} *dmabrg_handlers;
static inline void dmabrg_call_handler(int i)
{
dmabrg_handlers[i].handler(dmabrg_handlers[i].data);
}
/*
* main DMABRG irq handler. It acks irqs and then
* handles every set and unmasked bit sequentially.
* No locking and no validity checks; it should be
* as fast as possible (audio!)
*/
static irqreturn_t dmabrg_irq(int irq, void *data)
{
unsigned long dcr;
unsigned int i;
dcr = ctrl_inl(DMABRGCR);
ctrl_outl(dcr & ~0x00ff0003, DMABRGCR); /* ack all */
dcr &= dcr >> 8; /* ignore masked */
/* USB stuff, get it out of the way first */
if (dcr & 1)
dmabrg_call_handler(DMABRGIRQ_USBDMA);
if (dcr & 2)
dmabrg_call_handler(DMABRGIRQ_USBDMAERR);
/* Audio */
dcr >>= 16;
while (dcr) {
i = __ffs(dcr);
dcr &= dcr - 1;
dmabrg_call_handler(i + DMABRGIRQ_A0TXF);
}
return IRQ_HANDLED;
}
static void dmabrg_disable_irq(unsigned int dmairq)
{
unsigned long dcr;
dcr = ctrl_inl(DMABRGCR);
dcr &= ~(1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
ctrl_outl(dcr, DMABRGCR);
}
static void dmabrg_enable_irq(unsigned int dmairq)
{
unsigned long dcr;
dcr = ctrl_inl(DMABRGCR);
dcr |= (1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
ctrl_outl(dcr, DMABRGCR);
}
int dmabrg_request_irq(unsigned int dmairq, void(*handler)(void*),
void *data)
{
if ((dmairq > 9) || !handler)
return -ENOENT;
if (dmabrg_handlers[dmairq].handler)
return -EBUSY;
dmabrg_handlers[dmairq].handler = handler;
dmabrg_handlers[dmairq].data = data;
dmabrg_enable_irq(dmairq);
return 0;
}
EXPORT_SYMBOL_GPL(dmabrg_request_irq);
void dmabrg_free_irq(unsigned int dmairq)
{
if (likely(dmairq < 10)) {
dmabrg_disable_irq(dmairq);
dmabrg_handlers[dmairq].handler = NULL;
dmabrg_handlers[dmairq].data = NULL;
}
}
EXPORT_SYMBOL_GPL(dmabrg_free_irq);
static int __init dmabrg_init(void)
{
unsigned long or;
int ret;
dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler),
GFP_KERNEL);
if (!dmabrg_handlers)
return -ENOMEM;
#ifdef CONFIG_SH_DMA
/* request DMAC channel 0 before anyone else can get it */
ret = request_dma(0, "DMAC 0 (DMABRG)");
if (ret < 0)
printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n");
#endif
ctrl_outl(0, DMABRGCR);
ctrl_outl(0, DMACHCR0);
ctrl_outl(0x94000000, DMARSRA); /* enable DMABRG in DMAC 0 */
/* enable DMABRG mode, enable the DMAC */
or = ctrl_inl(DMAOR);
ctrl_outl(or | DMAOR_BRG | DMAOR_DMEN, DMAOR);
ret = request_irq(DMABRGI0, dmabrg_irq, IRQF_DISABLED,
"DMABRG USB address error", NULL);
if (ret)
goto out0;
ret = request_irq(DMABRGI1, dmabrg_irq, IRQF_DISABLED,
"DMABRG Transfer End", NULL);
if (ret)
goto out1;
ret = request_irq(DMABRGI2, dmabrg_irq, IRQF_DISABLED,
"DMABRG Transfer Half", NULL);
if (ret == 0)
return ret;
free_irq(DMABRGI1, 0);
out1: free_irq(DMABRGI0, 0);
out0: kfree(dmabrg_handlers);
return ret;
}
subsys_initcall(dmabrg_init);