add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,42 @@
config ARM_GIC
bool
config ARM_VIC
bool
config ARM_VIC_NR
int
default 2
depends on ARM_VIC
help
The maximum number of VICs available in the system, for
power management.
config ICST525
bool
config ICST307
bool
config SA1111
bool
select DMABOUNCE if !ARCH_PXA
config DMABOUNCE
bool
select ZONE_DMA
config TIMER_ACORN
bool
config SHARP_LOCOMO
bool
config SHARP_PARAM
bool
config SHARP_SCOOP
bool
config COMMON_CLKDEV
bool

View File

@@ -0,0 +1,19 @@
#
# Makefile for the linux kernel.
#
obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ARM_VIC) += vic.o
obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_ICST307) += icst307.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_TIMER_ACORN) += time-acorn.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_ARCH_IXP2000) += uengine.o
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o

View File

@@ -0,0 +1,178 @@
/*
* arch/arm/common/clkdev.c
*
* Copyright (C) 2008 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <asm/clkdev.h>
#include <mach/clkdev.h>
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
/*
* Find the correct struct clk for the device and connection ID.
* We do slightly fuzzy matching here:
* An entry with a NULL ID is assumed to be a wildcard.
* If an entry has a device ID, it must match
* If an entry has a connection ID, it must match
* Then we take the most specific entry - with the following
* order of precidence: dev+con > dev only > con only.
*/
static struct clk *clk_find(const char *dev_id, const char *con_id)
{
struct clk_lookup *p;
struct clk *clk = NULL;
int match, best = 0;
list_for_each_entry(p, &clocks, node) {
match = 0;
if (p->dev_id) {
if (!dev_id || strcmp(p->dev_id, dev_id))
continue;
match += 2;
}
if (p->con_id) {
if (!con_id || strcmp(p->con_id, con_id))
continue;
match += 1;
}
if (match > best) {
clk = p->clk;
if (match != 3)
best = match;
else
break;
}
}
return clk;
}
struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
struct clk *clk;
mutex_lock(&clocks_mutex);
clk = clk_find(dev_id, con_id);
if (clk && !__clk_get(clk))
clk = NULL;
mutex_unlock(&clocks_mutex);
return clk ? clk : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL(clk_get_sys);
struct clk *clk_get(struct device *dev, const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
return clk_get_sys(dev_id, con_id);
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
{
__clk_put(clk);
}
EXPORT_SYMBOL(clk_put);
void clkdev_add(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_add_tail(&cl->node, &clocks);
mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clkdev_add);
void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
{
mutex_lock(&clocks_mutex);
while (num--) {
list_add_tail(&cl->node, &clocks);
cl++;
}
mutex_unlock(&clocks_mutex);
}
#define MAX_DEV_ID 20
#define MAX_CON_ID 16
struct clk_lookup_alloc {
struct clk_lookup cl;
char dev_id[MAX_DEV_ID];
char con_id[MAX_CON_ID];
};
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
const char *dev_fmt, ...)
{
struct clk_lookup_alloc *cla;
cla = kzalloc(sizeof(*cla), GFP_KERNEL);
if (!cla)
return NULL;
cla->cl.clk = clk;
if (con_id) {
strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
cla->cl.con_id = cla->con_id;
}
if (dev_fmt) {
va_list ap;
va_start(ap, dev_fmt);
vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
cla->cl.dev_id = cla->dev_id;
va_end(ap);
}
return &cla->cl;
}
EXPORT_SYMBOL(clkdev_alloc);
int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device *dev)
{
struct clk *r = clk_get(dev, id);
struct clk_lookup *l;
if (IS_ERR(r))
return PTR_ERR(r);
l = clkdev_alloc(r, alias, alias_dev_name);
clk_put(r);
if (!l)
return -ENODEV;
clkdev_add(l);
return 0;
}
EXPORT_SYMBOL(clk_add_alias);
/*
* clkdev_drop - remove a clock dynamically allocated
*/
void clkdev_drop(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_del(&cl->node);
mutex_unlock(&clocks_mutex);
kfree(cl);
}
EXPORT_SYMBOL(clkdev_drop);

View File

@@ -0,0 +1,546 @@
/*
* arch/arm/common/dmabounce.c
*
* Special dma_{map/unmap/dma_sync}_* routines for systems that have
* limited DMA windows. These functions utilize bounce buffers to
* copy data to/from buffers located outside the DMA region. This
* only works for systems in which DMA memory is at the bottom of
* RAM, the remainder of memory is at the top and the DMA memory
* can be marked as ZONE_DMA. Anything beyond that such as discontiguous
* DMA windows will require custom implementations that reserve memory
* areas at early bootup.
*
* Original version by Brad Parker (brad@heeltoe.com)
* Re-written by Christopher Hoover <ch@murgatroid.com>
* Made generic by Deepak Saxena <dsaxena@plexity.net>
*
* Copyright (C) 2002 Hewlett Packard Company.
* Copyright (C) 2004 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/page-flags.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
#undef STATS
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#else
#define DO_STATS(X) do { } while (0)
#endif
/* ************************************************** */
struct safe_buffer {
struct list_head node;
/* original request */
void *ptr;
size_t size;
int direction;
/* safe buffer info */
struct dmabounce_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
};
struct dmabounce_pool {
unsigned long size;
struct dma_pool *pool;
#ifdef STATS
unsigned long allocs;
#endif
};
struct dmabounce_device_info {
struct device *dev;
struct list_head safe_buffers;
#ifdef STATS
unsigned long total_allocs;
unsigned long map_op_count;
unsigned long bounce_count;
int attr_res;
#endif
struct dmabounce_pool small;
struct dmabounce_pool large;
rwlock_t lock;
};
#ifdef STATS
static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
device_info->small.allocs,
device_info->large.allocs,
device_info->total_allocs - device_info->small.allocs -
device_info->large.allocs,
device_info->total_allocs,
device_info->map_op_count,
device_info->bounce_count);
}
static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
#endif
/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
struct dmabounce_pool *pool;
struct device *dev = device_info->dev;
unsigned long flags;
dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
__func__, ptr, size, dir);
if (size <= device_info->small.size) {
pool = &device_info->small;
} else if (size <= device_info->large.size) {
pool = &device_info->large;
} else {
pool = NULL;
}
buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
if (buf == NULL) {
dev_warn(dev, "%s: kmalloc failed\n", __func__);
return NULL;
}
buf->ptr = ptr;
buf->size = size;
buf->direction = dir;
buf->pool = pool;
if (pool) {
buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
&buf->safe_dma_addr);
} else {
buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
GFP_ATOMIC);
}
if (buf->safe == NULL) {
dev_warn(dev,
"%s: could not alloc dma memory (size=%d)\n",
__func__, size);
kfree(buf);
return NULL;
}
#ifdef STATS
if (pool)
pool->allocs++;
device_info->total_allocs++;
#endif
write_lock_irqsave(&device_info->lock, flags);
list_add(&buf->node, &device_info->safe_buffers);
write_unlock_irqrestore(&device_info->lock, flags);
return buf;
}
/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
struct safe_buffer *b, *rb = NULL;
unsigned long flags;
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr) {
rb = b;
break;
}
read_unlock_irqrestore(&device_info->lock, flags);
return rb;
}
static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
unsigned long flags;
dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
write_lock_irqsave(&device_info->lock, flags);
list_del(&buf->node);
write_unlock_irqrestore(&device_info->lock, flags);
if (buf->pool)
dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
else
dma_free_coherent(device_info->dev, buf->size, buf->safe,
buf->safe_dma_addr);
kfree(buf);
}
/* ************************************************** */
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
dma_addr_t dma_addr, const char *where)
{
if (!dev || !dev->archdata.dmabounce)
return NULL;
if (dma_mapping_error(dev, dma_addr)) {
if (dev)
dev_err(dev, "Trying to %s invalid mapping\n", where);
else
pr_err("unknown device: Trying to %s invalid mapping\n", where);
return NULL;
}
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dma_addr_t dma_addr;
int needs_bounce = 0;
if (device_info)
DO_STATS ( device_info->map_op_count++ );
dma_addr = virt_to_dma(dev, ptr);
if (dev->dma_mask) {
unsigned long mask = *dev->dma_mask;
unsigned long limit;
limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
return ~0;
}
/*
* Figure out if we need to bounce from the DMA mask.
*/
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
}
if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
struct safe_buffer *buf;
buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == 0) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return 0;
}
dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
if ((dir == DMA_TO_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
ptr = buf->safe;
dma_addr = buf->safe_dma_addr;
} else {
/*
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
dma_cache_maint(ptr, size, dir);
}
return dma_addr;
}
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
void *ptr = buf->ptr;
dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, ptr, size);
memcpy(ptr, buf->safe, size);
/*
* DMA buffers must have the same cache properties
* as if they were really used for DMA - which means
* data must be written back to RAM. Note that
* we don't use dmac_flush_range() here for the
* bidirectional case because we know the cache
* lines will be coherent with the data written.
*/
dmac_clean_range(ptr, ptr + size);
outer_clean_range(__pa(ptr), __pa(ptr) + size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}
}
/* ************************************************** */
/*
* see if a buffer address is in an 'unsafe' range. if it is
* allocate a 'safe' buffer and copy the unsafe buffer into it.
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, dir);
BUG_ON(!valid_dma_direction(dir));
return map_single(dev, ptr, size, dir);
}
EXPORT_SYMBOL(dma_map_single);
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
__func__, page, offset, size, dir);
BUG_ON(!valid_dma_direction(dir));
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
"is not supported\n");
return ~0;
}
return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer. (basically return things back to the way they
* should be)
*/
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);
unmap_single(dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_single);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf)
return 1;
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe + off, buf->ptr + off, sz);
memcpy(buf->ptr + off, buf->safe + off, sz);
}
return 0;
}
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf)
return 1;
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
__func__,buf->ptr + off, buf->safe + off, sz);
memcpy(buf->safe + off, buf->ptr + off, sz);
}
return 0;
}
EXPORT_SYMBOL(dmabounce_sync_for_device);
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size)
{
pool->size = size;
DO_STATS(pool->allocs = 0);
pool->pool = dma_pool_create(name, dev, size,
0 /* byte alignment */,
0 /* no page-crossing issues */);
return pool->pool ? 0 : -ENOMEM;
}
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
unsigned long large_buffer_size)
{
struct dmabounce_device_info *device_info;
int ret;
device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
if (!device_info) {
dev_err(dev,
"Could not allocated dmabounce_device_info\n");
return -ENOMEM;
}
ret = dmabounce_init_pool(&device_info->small, dev,
"small_dmabounce_pool", small_buffer_size);
if (ret) {
dev_err(dev,
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
small_buffer_size);
goto err_free;
}
if (large_buffer_size) {
ret = dmabounce_init_pool(&device_info->large, dev,
"large_dmabounce_pool",
large_buffer_size);
if (ret) {
dev_err(dev,
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
large_buffer_size);
goto err_destroy;
}
}
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
rwlock_init(&device_info->lock);
#ifdef STATS
device_info->total_allocs = 0;
device_info->map_op_count = 0;
device_info->bounce_count = 0;
device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
#endif
dev->archdata.dmabounce = device_info;
dev_info(dev, "dmabounce: registered device\n");
return 0;
err_destroy:
dma_pool_destroy(device_info->small.pool);
err_free:
kfree(device_info);
return ret;
}
EXPORT_SYMBOL(dmabounce_register_dev);
void dmabounce_unregister_dev(struct device *dev)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL;
if (!device_info) {
dev_warn(dev,
"Never registered with dmabounce but attempting"
"to unregister!\n");
return;
}
if (!list_empty(&device_info->safe_buffers)) {
dev_err(dev,
"Removing from dmabounce with pending buffers!\n");
BUG();
}
if (device_info->small.pool)
dma_pool_destroy(device_info->small.pool);
if (device_info->large.pool)
dma_pool_destroy(device_info->large.pool);
#ifdef STATS
if (device_info->attr_res == 0)
device_remove_file(dev, &dev_attr_dmabounce_stats);
#endif
kfree(device_info);
dev_info(dev, "dmabounce: device unregistered\n");
}
EXPORT_SYMBOL(dmabounce_unregister_dev);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,265 @@
/*
* linux/arch/arm/common/gic.c
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Interrupt architecture for the GIC:
*
* o There is one Interrupt Distributor, which receives interrupts
* from system devices and sends them to the Interrupt Controllers.
*
* o There is one CPU Interface per CPU, which sends interrupts sent
* by the Distributor, and interrupts generated locally, to the
* associated CPU. The base address of the CPU interface is usually
* aliased so that the same address points to different chips depending
* on the CPU it is accessed from.
*
* Note that IRQs 0-31 are special - they are local to each CPU.
* As such, the enable set/clear, pending set/clear and active bit
* registers are banked per-cpu for these sources.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/hardware/gic.h>
static DEFINE_SPINLOCK(irq_controller_lock);
struct gic_chip_data {
unsigned int irq_offset;
void __iomem *dist_base;
void __iomem *cpu_base;
};
#ifndef MAX_GIC_NR
#define MAX_GIC_NR 1
#endif
static struct gic_chip_data gic_data[MAX_GIC_NR];
static inline void __iomem *gic_dist_base(unsigned int irq)
{
struct gic_chip_data *gic_data = get_irq_chip_data(irq);
return gic_data->dist_base;
}
static inline void __iomem *gic_cpu_base(unsigned int irq)
{
struct gic_chip_data *gic_data = get_irq_chip_data(irq);
return gic_data->cpu_base;
}
static inline unsigned int gic_irq(unsigned int irq)
{
struct gic_chip_data *gic_data = get_irq_chip_data(irq);
return irq - gic_data->irq_offset;
}
/*
* Routines to acknowledge, disable and enable interrupts
*
* Linux assumes that when we're done with an interrupt we need to
* unmask it, in the same way we need to unmask an interrupt when
* we first enable it.
*
* The GIC has a separate notion of "end of interrupt" to re-enable
* an interrupt after handling, in order to support hardware
* prioritisation.
*
* We can make the GIC behave in the way that Linux expects by making
* our "acknowledge" routine disable the interrupt, then mark it as
* complete.
*/
static void gic_ack_irq(unsigned int irq)
{
u32 mask = 1 << (irq % 32);
spin_lock(&irq_controller_lock);
writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
spin_unlock(&irq_controller_lock);
}
static void gic_mask_irq(unsigned int irq)
{
u32 mask = 1 << (irq % 32);
spin_lock(&irq_controller_lock);
writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
spin_unlock(&irq_controller_lock);
}
static void gic_unmask_irq(unsigned int irq)
{
u32 mask = 1 << (irq % 32);
spin_lock(&irq_controller_lock);
writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
spin_unlock(&irq_controller_lock);
}
#ifdef CONFIG_SMP
static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
{
void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
unsigned int shift = (irq % 4) * 8;
unsigned int cpu = cpumask_first(mask_val);
u32 val;
spin_lock(&irq_controller_lock);
irq_desc[irq].node = cpu;
val = readl(reg) & ~(0xff << shift);
val |= 1 << (cpu + shift);
writel(val, reg);
spin_unlock(&irq_controller_lock);
return 0;
}
#endif
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
struct gic_chip_data *chip_data = get_irq_data(irq);
struct irq_chip *chip = get_irq_chip(irq);
unsigned int cascade_irq, gic_irq;
unsigned long status;
/* primary controller ack'ing */
chip->ack(irq);
spin_lock(&irq_controller_lock);
status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
spin_unlock(&irq_controller_lock);
gic_irq = (status & 0x3ff);
if (gic_irq == 1023)
goto out;
cascade_irq = gic_irq + chip_data->irq_offset;
if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
do_bad_IRQ(cascade_irq, desc);
else
generic_handle_irq(cascade_irq);
out:
/* primary controller unmasking */
chip->unmask(irq);
}
static struct irq_chip gic_chip = {
.name = "GIC",
.ack = gic_ack_irq,
.mask = gic_mask_irq,
.unmask = gic_unmask_irq,
#ifdef CONFIG_SMP
.set_affinity = gic_set_cpu,
#endif
};
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
if (gic_nr >= MAX_GIC_NR)
BUG();
if (set_irq_data(irq, &gic_data[gic_nr]) != 0)
BUG();
set_irq_chained_handler(irq, gic_handle_cascade_irq);
}
void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
unsigned int irq_start)
{
unsigned int max_irq, i;
u32 cpumask = 1 << smp_processor_id();
if (gic_nr >= MAX_GIC_NR)
BUG();
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
gic_data[gic_nr].dist_base = base;
gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
writel(0, base + GIC_DIST_CTRL);
/*
* Find out how many interrupts are supported.
*/
max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
max_irq = (max_irq + 1) * 32;
/*
* The GIC only supports up to 1020 interrupt sources.
* Limit this to either the architected maximum, or the
* platform maximum.
*/
if (max_irq > max(1020, NR_IRQS))
max_irq = max(1020, NR_IRQS);
/*
* Set all global interrupts to be level triggered, active low.
*/
for (i = 32; i < max_irq; i += 16)
writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
/*
* Set all global interrupts to this CPU only.
*/
for (i = 32; i < max_irq; i += 4)
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
/*
* Set priority on all interrupts.
*/
for (i = 0; i < max_irq; i += 4)
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
/*
* Disable all interrupts.
*/
for (i = 0; i < max_irq; i += 32)
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
/*
* Setup the Linux IRQ subsystem.
*/
for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) {
set_irq_chip(i, &gic_chip);
set_irq_chip_data(i, &gic_data[gic_nr]);
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
writel(1, base + GIC_DIST_CTRL);
}
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
{
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_data[gic_nr].cpu_base = base;
writel(0xf0, base + GIC_CPU_PRIMASK);
writel(1, base + GIC_CPU_CTRL);
}
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
unsigned long map = *cpus_addr(*mask);
/* this always happens on GIC0 */
writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
}
#endif

View File

@@ -0,0 +1,161 @@
/*
* linux/arch/arm/common/icst307.c
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Support functions for calculating clocks/divisors for the ICST307
* clock generators. See http://www.icst.com/ for more information
* on these devices.
*
* This is an almost identical implementation to the ICST525 clock generator.
* The s2div and idx2s files are different
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/hardware/icst307.h>
/*
* Divisors for each OD setting.
*/
static unsigned char s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco)
{
return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * s2div[vco.s]);
}
EXPORT_SYMBOL(icst307_khz);
/*
* Ascending divisor S values.
*/
static unsigned char idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
struct icst307_vco
icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq)
{
struct icst307_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
unsigned long f;
unsigned int i = 0, rd, best = (unsigned int)-1;
/*
* First, find the PLL output divisor such
* that the PLL output is within spec.
*/
do {
f = freq * s2div[idx2s[i]];
/*
* f must be between 6MHz and 200MHz (3.3 or 5V)
*/
if (f > 6000 && f <= p->vco_max)
break;
} while (i < ARRAY_SIZE(idx2s));
if (i >= ARRAY_SIZE(idx2s))
return vco;
vco.s = idx2s[i];
/*
* Now find the closest divisor combination
* which gives a PLL output of 'f'.
*/
for (rd = p->rd_min; rd <= p->rd_max; rd++) {
unsigned long fref_div, f_pll;
unsigned int vd;
int f_diff;
fref_div = (2 * p->ref) / rd;
vd = (f + fref_div / 2) / fref_div;
if (vd < p->vd_min || vd > p->vd_max)
continue;
f_pll = fref_div * vd;
f_diff = f_pll - f;
if (f_diff < 0)
f_diff = -f_diff;
if ((unsigned)f_diff < best) {
vco.v = vd - 8;
vco.r = rd - 2;
if (f_diff == 0)
break;
best = f_diff;
}
}
return vco;
}
EXPORT_SYMBOL(icst307_khz_to_vco);
struct icst307_vco
icst307_ps_to_vco(const struct icst307_params *p, unsigned long period)
{
struct icst307_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
unsigned long f, ps;
unsigned int i = 0, rd, best = (unsigned int)-1;
ps = 1000000000UL / p->vco_max;
/*
* First, find the PLL output divisor such
* that the PLL output is within spec.
*/
do {
f = period / s2div[idx2s[i]];
/*
* f must be between 6MHz and 200MHz (3.3 or 5V)
*/
if (f >= ps && f < 1000000000UL / 6000 + 1)
break;
} while (i < ARRAY_SIZE(idx2s));
if (i >= ARRAY_SIZE(idx2s))
return vco;
vco.s = idx2s[i];
ps = 500000000UL / p->ref;
/*
* Now find the closest divisor combination
* which gives a PLL output of 'f'.
*/
for (rd = p->rd_min; rd <= p->rd_max; rd++) {
unsigned long f_in_div, f_pll;
unsigned int vd;
int f_diff;
f_in_div = ps * rd;
vd = (f_in_div + f / 2) / f;
if (vd < p->vd_min || vd > p->vd_max)
continue;
f_pll = (f_in_div + vd / 2) / vd;
f_diff = f_pll - f;
if (f_diff < 0)
f_diff = -f_diff;
if ((unsigned)f_diff < best) {
vco.v = vd - 8;
vco.r = rd - 2;
if (f_diff == 0)
break;
best = f_diff;
}
}
return vco;
}
EXPORT_SYMBOL(icst307_ps_to_vco);

View File

@@ -0,0 +1,160 @@
/*
* linux/arch/arm/common/icst525.c
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Support functions for calculating clocks/divisors for the ICST525
* clock generators. See http://www.icst.com/ for more information
* on these devices.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/hardware/icst525.h>
/*
* Divisors for each OD setting.
*/
static unsigned char s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco)
{
return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * s2div[vco.s]);
}
EXPORT_SYMBOL(icst525_khz);
/*
* Ascending divisor S values.
*/
static unsigned char idx2s[] = { 1, 3, 4, 7, 5, 2, 6, 0 };
struct icst525_vco
icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq)
{
struct icst525_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
unsigned long f;
unsigned int i = 0, rd, best = (unsigned int)-1;
/*
* First, find the PLL output divisor such
* that the PLL output is within spec.
*/
do {
f = freq * s2div[idx2s[i]];
/*
* f must be between 10MHz and
* 320MHz (5V) or 200MHz (3V)
*/
if (f > 10000 && f <= p->vco_max)
break;
} while (i < ARRAY_SIZE(idx2s));
if (i >= ARRAY_SIZE(idx2s))
return vco;
vco.s = idx2s[i];
/*
* Now find the closest divisor combination
* which gives a PLL output of 'f'.
*/
for (rd = p->rd_min; rd <= p->rd_max; rd++) {
unsigned long fref_div, f_pll;
unsigned int vd;
int f_diff;
fref_div = (2 * p->ref) / rd;
vd = (f + fref_div / 2) / fref_div;
if (vd < p->vd_min || vd > p->vd_max)
continue;
f_pll = fref_div * vd;
f_diff = f_pll - f;
if (f_diff < 0)
f_diff = -f_diff;
if ((unsigned)f_diff < best) {
vco.v = vd - 8;
vco.r = rd - 2;
if (f_diff == 0)
break;
best = f_diff;
}
}
return vco;
}
EXPORT_SYMBOL(icst525_khz_to_vco);
struct icst525_vco
icst525_ps_to_vco(const struct icst525_params *p, unsigned long period)
{
struct icst525_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
unsigned long f, ps;
unsigned int i = 0, rd, best = (unsigned int)-1;
ps = 1000000000UL / p->vco_max;
/*
* First, find the PLL output divisor such
* that the PLL output is within spec.
*/
do {
f = period / s2div[idx2s[i]];
/*
* f must be between 10MHz and
* 320MHz (5V) or 200MHz (3V)
*/
if (f >= ps && f < 100000)
break;
} while (i < ARRAY_SIZE(idx2s));
if (i >= ARRAY_SIZE(idx2s))
return vco;
vco.s = idx2s[i];
ps = 500000000UL / p->ref;
/*
* Now find the closest divisor combination
* which gives a PLL output of 'f'.
*/
for (rd = p->rd_min; rd <= p->rd_max; rd++) {
unsigned long f_in_div, f_pll;
unsigned int vd;
int f_diff;
f_in_div = ps * rd;
vd = (f_in_div + f / 2) / f;
if (vd < p->vd_min || vd > p->vd_max)
continue;
f_pll = (f_in_div + vd / 2) / vd;
f_diff = f_pll - f;
if (f_diff < 0)
f_diff = -f_diff;
if ((unsigned)f_diff < best) {
vco.v = vd - 8;
vco.r = rd - 2;
if (f_diff == 0)
break;
best = f_diff;
}
}
return vco;
}
EXPORT_SYMBOL(icst525_ps_to_vco);

View File

@@ -0,0 +1,374 @@
/*
* linux/arch/arm/common/it8152.c
*
* Copyright Compulab Ltd, 2002-2007
* Mike Rapoport <mike@compulab.co.il>
*
* The DMA bouncing part is taken from arch/arm/mach-ixp4xx/common-pci.c
* (see this file for respective copyrights)
*
* Thanks to Guennadi Liakhovetski <gl@dsa-ac.de> for IRQ enumberation
* and demux code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/mach/pci.h>
#include <asm/hardware/it8152.h>
#define MAX_SLOTS 21
static void it8152_mask_irq(unsigned int irq)
{
if (irq >= IT8152_LD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) |
(1 << (irq - IT8152_LD_IRQ(0)))),
IT8152_INTC_LDCNIMR);
} else if (irq >= IT8152_LP_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) |
(1 << (irq - IT8152_LP_IRQ(0)))),
IT8152_INTC_LPCNIMR);
} else if (irq >= IT8152_PD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) |
(1 << (irq - IT8152_PD_IRQ(0)))),
IT8152_INTC_PDCNIMR);
}
}
static void it8152_unmask_irq(unsigned int irq)
{
if (irq >= IT8152_LD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) &
~(1 << (irq - IT8152_LD_IRQ(0)))),
IT8152_INTC_LDCNIMR);
} else if (irq >= IT8152_LP_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) &
~(1 << (irq - IT8152_LP_IRQ(0)))),
IT8152_INTC_LPCNIMR);
} else if (irq >= IT8152_PD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) &
~(1 << (irq - IT8152_PD_IRQ(0)))),
IT8152_INTC_PDCNIMR);
}
}
static struct irq_chip it8152_irq_chip = {
.name = "it8152",
.ack = it8152_mask_irq,
.mask = it8152_mask_irq,
.unmask = it8152_unmask_irq,
};
void it8152_init_irq(void)
{
int irq;
__raw_writel((0xffff), IT8152_INTC_PDCNIMR);
__raw_writel((0), IT8152_INTC_PDCNIRR);
__raw_writel((0xffff), IT8152_INTC_LPCNIMR);
__raw_writel((0), IT8152_INTC_LPCNIRR);
__raw_writel((0xffff), IT8152_INTC_LDCNIMR);
__raw_writel((0), IT8152_INTC_LDCNIRR);
for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) {
set_irq_chip(irq, &it8152_irq_chip);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
void it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
{
int bits_pd, bits_lp, bits_ld;
int i;
while (1) {
/* Read all */
bits_pd = __raw_readl(IT8152_INTC_PDCNIRR);
bits_lp = __raw_readl(IT8152_INTC_LPCNIRR);
bits_ld = __raw_readl(IT8152_INTC_LDCNIRR);
/* Ack */
__raw_writel((~bits_pd), IT8152_INTC_PDCNIRR);
__raw_writel((~bits_lp), IT8152_INTC_LPCNIRR);
__raw_writel((~bits_ld), IT8152_INTC_LDCNIRR);
if (!(bits_ld | bits_lp | bits_pd)) {
/* Re-read to guarantee, that there was a moment of
time, when they all three were 0. */
bits_pd = __raw_readl(IT8152_INTC_PDCNIRR);
bits_lp = __raw_readl(IT8152_INTC_LPCNIRR);
bits_ld = __raw_readl(IT8152_INTC_LDCNIRR);
if (!(bits_ld | bits_lp | bits_pd))
return;
}
bits_pd &= ((1 << IT8152_PD_IRQ_COUNT) - 1);
while (bits_pd) {
i = __ffs(bits_pd);
generic_handle_irq(IT8152_PD_IRQ(i));
bits_pd &= ~(1 << i);
}
bits_lp &= ((1 << IT8152_LP_IRQ_COUNT) - 1);
while (bits_lp) {
i = __ffs(bits_lp);
generic_handle_irq(IT8152_LP_IRQ(i));
bits_lp &= ~(1 << i);
}
bits_ld &= ((1 << IT8152_LD_IRQ_COUNT) - 1);
while (bits_ld) {
i = __ffs(bits_ld);
generic_handle_irq(IT8152_LD_IRQ(i));
bits_ld &= ~(1 << i);
}
}
}
/* mapping for on-chip devices */
int __init it8152_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
if ((dev->vendor == PCI_VENDOR_ID_ITE) &&
(dev->device == PCI_DEVICE_ID_ITE_8152)) {
if ((dev->class >> 8) == PCI_CLASS_MULTIMEDIA_AUDIO)
return IT8152_AUDIO_INT;
if ((dev->class >> 8) == PCI_CLASS_SERIAL_USB)
return IT8152_USB_INT;
if ((dev->class >> 8) == PCI_CLASS_SYSTEM_DMA)
return IT8152_CDMA_INT;
}
return 0;
}
static unsigned long it8152_pci_dev_base_address(struct pci_bus *bus,
unsigned int devfn)
{
unsigned long addr = 0;
if (bus->number == 0) {
if (devfn < PCI_DEVFN(MAX_SLOTS, 0))
addr = (devfn << 8);
} else
addr = (bus->number << 16) | (devfn << 8);
return addr;
}
static int it8152_pci_read_config(struct pci_bus *bus,
unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr = it8152_pci_dev_base_address(bus, devfn);
u32 v;
int shift;
shift = (where & 3);
__raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
v = (__raw_readl(IT8152_PCI_CFG_DATA) >> (8 * (shift)));
*value = v;
return PCIBIOS_SUCCESSFUL;
}
static int it8152_pci_write_config(struct pci_bus *bus,
unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr = it8152_pci_dev_base_address(bus, devfn);
u32 v, vtemp, mask = 0;
int shift;
if (size == 1)
mask = 0xff;
if (size == 2)
mask = 0xffff;
shift = (where & 3);
__raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
vtemp = __raw_readl(IT8152_PCI_CFG_DATA);
if (mask)
vtemp &= ~(mask << (8 * shift));
else
vtemp = 0;
v = (value << (8 * shift));
__raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
__raw_writel((v | vtemp), IT8152_PCI_CFG_DATA);
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops it8152_ops = {
.read = it8152_pci_read_config,
.write = it8152_pci_write_config,
};
static struct resource it8152_io = {
.name = "IT8152 PCI I/O region",
.flags = IORESOURCE_IO,
};
static struct resource it8152_mem = {
.name = "IT8152 PCI memory region",
.start = 0x10000000,
.end = 0x13e00000,
.flags = IORESOURCE_MEM,
};
/*
* The following functions are needed for DMA bouncing.
* ITE8152 chip can addrees up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window
*/
/*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
* other devices.
*/
static int it8152_pci_platform_notify(struct device *dev)
{
if (dev->bus == &pci_bus_type) {
if (dev->dma_mask)
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dmabounce_register_dev(dev, 2048, 4096);
}
return 0;
}
static int it8152_pci_platform_notify_remove(struct device *dev)
{
if (dev->bus == &pci_bus_type)
dmabounce_unregister_dev(dev);
return 0;
}
int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dev->bus == &pci_bus_type) &&
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}
/*
* We override these so we properly do dmabounce otherwise drivers
* are able to set the dma_mask to 0xffffffff and we can no longer
* trap bounces. :(
*
* We just return true on everyhing except for < 64MB in which case
* we will fail miseralby and die since we can't handle that case.
*/
int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
dev_dbg(&dev->dev, "%s: %llx\n", __func__, mask);
if (mask >= PHYS_OFFSET + SZ_64M - 1)
return 0;
return -EIO;
}
int
pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
dev_dbg(&dev->dev, "%s: %llx\n", __func__, mask);
if (mask >= PHYS_OFFSET + SZ_64M - 1)
return 0;
return -EIO;
}
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;
it8152_io.end = IT8152_IO_BASE + 0x12000 + 0x100000;
sys->mem_offset = 0x10000000;
sys->io_offset = IT8152_IO_BASE;
if (request_resource(&ioport_resource, &it8152_io)) {
printk(KERN_ERR "PCI: unable to allocate IO region\n");
goto err0;
}
if (request_resource(&iomem_resource, &it8152_mem)) {
printk(KERN_ERR "PCI: unable to allocate memory region\n");
goto err1;
}
sys->resource[0] = &it8152_io;
sys->resource[1] = &it8152_mem;
if (platform_notify || platform_notify_remove) {
printk(KERN_ERR "PCI: Can't use platform_notify\n");
goto err2;
}
platform_notify = it8152_pci_platform_notify;
platform_notify_remove = it8152_pci_platform_notify_remove;
return 1;
err2:
release_resource(&it8152_io);
err1:
release_resource(&it8152_mem);
err0:
return -EBUSY;
}
/*
* If we set up a device for bus mastering, we need to check the latency
* timer as we don't have even crappy BIOSes to set it properly.
* The implementation is from arch/i386/pci/i386.c
*/
unsigned int pcibios_max_latency = 255;
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
/* no need to update on-chip OHCI controller */
if ((dev->vendor == PCI_VENDOR_ID_ITE) &&
(dev->device == PCI_DEVICE_ID_ITE_8152) &&
((dev->class >> 8) == PCI_CLASS_SERIAL_USB))
return;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n",
pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_bus(nr, &it8152_ops, sys);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,283 @@
/*
* Support code for the SCOOP interface found on various Sharp PDAs
*
* Copyright (c) 2004 Richard Purdie
*
* Based on code written by Sharp/Lineo for 2.4 kernels
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/gpio.h>
#include <asm/hardware/scoop.h>
/* PCMCIA to Scoop linkage
There is no easy way to link multiple scoop devices into one
single entity for the pxa2xx_pcmcia device so this structure
is used which is setup by the platform code.
This file is never modular so this symbol is always
accessile to the board support files.
*/
struct scoop_pcmcia_config *platform_scoop_config;
EXPORT_SYMBOL(platform_scoop_config);
struct scoop_dev {
void __iomem *base;
struct gpio_chip gpio;
spinlock_t scoop_lock;
unsigned short suspend_clr;
unsigned short suspend_set;
u32 scoop_gpwr;
};
void reset_scoop(struct device *dev)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
iowrite16(0x0100, sdev->base + SCOOP_MCR); // 00
iowrite16(0x0000, sdev->base + SCOOP_CDR); // 04
iowrite16(0x0000, sdev->base + SCOOP_CCR); // 10
iowrite16(0x0000, sdev->base + SCOOP_IMR); // 18
iowrite16(0x00FF, sdev->base + SCOOP_IRM); // 14
iowrite16(0x0000, sdev->base + SCOOP_ISR); // 1C
iowrite16(0x0000, sdev->base + SCOOP_IRM);
}
static void __scoop_gpio_set(struct scoop_dev *sdev,
unsigned offset, int value)
{
unsigned short gpwr;
gpwr = ioread16(sdev->base + SCOOP_GPWR);
if (value)
gpwr |= 1 << (offset + 1);
else
gpwr &= ~(1 << (offset + 1));
iowrite16(gpwr, sdev->base + SCOOP_GPWR);
}
static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
unsigned long flags;
spin_lock_irqsave(&sdev->scoop_lock, flags);
__scoop_gpio_set(sdev, offset, value);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
}
static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
/* XXX: I'm usure, but it seems so */
return ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1));
}
static int scoop_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
unsigned long flags;
unsigned short gpcr;
spin_lock_irqsave(&sdev->scoop_lock, flags);
gpcr = ioread16(sdev->base + SCOOP_GPCR);
gpcr &= ~(1 << (offset + 1));
iowrite16(gpcr, sdev->base + SCOOP_GPCR);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
return 0;
}
static int scoop_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
unsigned long flags;
unsigned short gpcr;
spin_lock_irqsave(&sdev->scoop_lock, flags);
__scoop_gpio_set(sdev, offset, value);
gpcr = ioread16(sdev->base + SCOOP_GPCR);
gpcr |= 1 << (offset + 1);
iowrite16(gpcr, sdev->base + SCOOP_GPCR);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
return 0;
}
unsigned short read_scoop_reg(struct device *dev, unsigned short reg)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
return ioread16(sdev->base + reg);
}
void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
iowrite16(data, sdev->base + reg);
}
EXPORT_SYMBOL(reset_scoop);
EXPORT_SYMBOL(read_scoop_reg);
EXPORT_SYMBOL(write_scoop_reg);
static void check_scoop_reg(struct scoop_dev *sdev)
{
unsigned short mcr;
mcr = ioread16(sdev->base + SCOOP_MCR);
if ((mcr & 0x100) == 0)
iowrite16(0x0101, sdev->base + SCOOP_MCR);
}
#ifdef CONFIG_PM
static int scoop_suspend(struct platform_device *dev, pm_message_t state)
{
struct scoop_dev *sdev = platform_get_drvdata(dev);
check_scoop_reg(sdev);
sdev->scoop_gpwr = ioread16(sdev->base + SCOOP_GPWR);
iowrite16((sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set, sdev->base + SCOOP_GPWR);
return 0;
}
static int scoop_resume(struct platform_device *dev)
{
struct scoop_dev *sdev = platform_get_drvdata(dev);
check_scoop_reg(sdev);
iowrite16(sdev->scoop_gpwr, sdev->base + SCOOP_GPWR);
return 0;
}
#else
#define scoop_suspend NULL
#define scoop_resume NULL
#endif
static int __devinit scoop_probe(struct platform_device *pdev)
{
struct scoop_dev *devptr;
struct scoop_config *inf;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int ret;
int temp;
if (!mem)
return -EINVAL;
devptr = kzalloc(sizeof(struct scoop_dev), GFP_KERNEL);
if (!devptr)
return -ENOMEM;
spin_lock_init(&devptr->scoop_lock);
inf = pdev->dev.platform_data;
devptr->base = ioremap(mem->start, mem->end - mem->start + 1);
if (!devptr->base) {
ret = -ENOMEM;
goto err_ioremap;
}
platform_set_drvdata(pdev, devptr);
printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base);
iowrite16(0x0140, devptr->base + SCOOP_MCR);
reset_scoop(&pdev->dev);
iowrite16(0x0000, devptr->base + SCOOP_CPR);
iowrite16(inf->io_dir & 0xffff, devptr->base + SCOOP_GPCR);
iowrite16(inf->io_out & 0xffff, devptr->base + SCOOP_GPWR);
devptr->suspend_clr = inf->suspend_clr;
devptr->suspend_set = inf->suspend_set;
devptr->gpio.base = -1;
if (inf->gpio_base != 0) {
devptr->gpio.label = dev_name(&pdev->dev);
devptr->gpio.base = inf->gpio_base;
devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */
devptr->gpio.set = scoop_gpio_set;
devptr->gpio.get = scoop_gpio_get;
devptr->gpio.direction_input = scoop_gpio_direction_input;
devptr->gpio.direction_output = scoop_gpio_direction_output;
ret = gpiochip_add(&devptr->gpio);
if (ret)
goto err_gpio;
}
return 0;
if (devptr->gpio.base != -1)
temp = gpiochip_remove(&devptr->gpio);
err_gpio:
platform_set_drvdata(pdev, NULL);
err_ioremap:
iounmap(devptr->base);
kfree(devptr);
return ret;
}
static int __devexit scoop_remove(struct platform_device *pdev)
{
struct scoop_dev *sdev = platform_get_drvdata(pdev);
int ret;
if (!sdev)
return -EINVAL;
if (sdev->gpio.base != -1) {
ret = gpiochip_remove(&sdev->gpio);
if (ret) {
dev_err(&pdev->dev, "Can't remove gpio chip: %d\n", ret);
return ret;
}
}
platform_set_drvdata(pdev, NULL);
iounmap(sdev->base);
kfree(sdev);
return 0;
}
static struct platform_driver scoop_driver = {
.probe = scoop_probe,
.remove = __devexit_p(scoop_remove),
.suspend = scoop_suspend,
.resume = scoop_resume,
.driver = {
.name = "sharp-scoop",
},
};
static int __init scoop_init(void)
{
return platform_driver_register(&scoop_driver);
}
subsys_initcall(scoop_init);

View File

@@ -0,0 +1,62 @@
/*
* Hardware parameter area specific to Sharp SL series devices
*
* Copyright (c) 2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mach/sharpsl_param.h>
/*
* Certain hardware parameters determined at the time of device manufacture,
* typically including LCD parameters are loaded by the bootloader at the
* address PARAM_BASE. As the kernel will overwrite them, we need to store
* them early in the boot process, then pass them to the appropriate drivers.
* Not all devices use all parameters but the format is common to all.
*/
#ifdef CONFIG_ARCH_SA1100
#define PARAM_BASE 0xe8ffc000
#else
#define PARAM_BASE 0xa0000a00
#endif
#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
#define COMADJ_MAGIC MAGIC_CHG('C','M','A','D')
#define UUID_MAGIC MAGIC_CHG('U','U','I','D')
#define TOUCH_MAGIC MAGIC_CHG('T','U','C','H')
#define AD_MAGIC MAGIC_CHG('B','V','A','D')
#define PHAD_MAGIC MAGIC_CHG('P','H','A','D')
struct sharpsl_param_info sharpsl_param;
EXPORT_SYMBOL(sharpsl_param);
void sharpsl_save_param(void)
{
memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
sharpsl_param.comadj=-1;
if (sharpsl_param.phad_keyword != PHAD_MAGIC)
sharpsl_param.phadadj=-1;
if (sharpsl_param.uuid_keyword != UUID_MAGIC)
sharpsl_param.uuid[0]=-1;
if (sharpsl_param.touch_keyword != TOUCH_MAGIC)
sharpsl_param.touch_xp=-1;
if (sharpsl_param.adadj_keyword != AD_MAGIC)
sharpsl_param.adadj=-1;
}

View File

@@ -0,0 +1,95 @@
/*
* linux/arch/arm/common/time-acorn.c
*
* Copyright (c) 1996-2000 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 24-Sep-1996 RMK Created
* 10-Oct-1996 RMK Brought up to date with arch-sa110eval
* 04-Dec-1997 RMK Updated for new arch/arm/time.c
* 13=Jun-2004 DS Moved to arch/arm/common b/c shared w/CLPS7500
*/
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/hardware/ioc.h>
#include <asm/mach/time.h>
unsigned long ioc_timer_gettimeoffset(void)
{
unsigned int count1, count2, status;
long offset;
ioc_writeb (0, IOC_T0LATCH);
barrier ();
count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
barrier ();
status = ioc_readb(IOC_IRQREQA);
barrier ();
ioc_writeb (0, IOC_T0LATCH);
barrier ();
count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
offset = count2;
if (count2 < count1) {
/*
* We have not had an interrupt between reading count1
* and count2.
*/
if (status & (1 << 5))
offset -= LATCH;
} else if (count2 > count1) {
/*
* We have just had another interrupt between reading
* count1 and count2.
*/
offset -= LATCH;
}
offset = (LATCH - offset) * (tick_nsec / 1000);
return (offset + LATCH/2) / LATCH;
}
void __init ioctime_init(void)
{
ioc_writeb(LATCH & 255, IOC_T0LTCHL);
ioc_writeb(LATCH >> 8, IOC_T0LTCHH);
ioc_writeb(0, IOC_T0GO);
}
static irqreturn_t
ioc_timer_interrupt(int irq, void *dev_id)
{
timer_tick();
return IRQ_HANDLED;
}
static struct irqaction ioc_timer_irq = {
.name = "timer",
.flags = IRQF_DISABLED,
.handler = ioc_timer_interrupt
};
/*
* Set up timer interrupt.
*/
static void __init ioc_timer_init(void)
{
ioctime_init();
setup_irq(IRQ_TIMER, &ioc_timer_irq);
}
struct sys_timer ioc_timer = {
.init = ioc_timer_init,
.offset = ioc_timer_gettimeoffset,
};

View File

@@ -0,0 +1,507 @@
/*
* Generic library functions for the microengines found on the Intel
* IXP2000 series of network processors.
*
* Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
* Dedicated to Marija Kulikova.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of the
* License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/hardware/uengine.h>
#if defined(CONFIG_ARCH_IXP2000)
#define IXP_UENGINE_CSR_VIRT_BASE IXP2000_UENGINE_CSR_VIRT_BASE
#define IXP_PRODUCT_ID IXP2000_PRODUCT_ID
#define IXP_MISC_CONTROL IXP2000_MISC_CONTROL
#define IXP_RESET1 IXP2000_RESET1
#else
#if defined(CONFIG_ARCH_IXP23XX)
#define IXP_UENGINE_CSR_VIRT_BASE IXP23XX_UENGINE_CSR_VIRT_BASE
#define IXP_PRODUCT_ID IXP23XX_PRODUCT_ID
#define IXP_MISC_CONTROL IXP23XX_MISC_CONTROL
#define IXP_RESET1 IXP23XX_RESET1
#else
#error unknown platform
#endif
#endif
#define USTORE_ADDRESS 0x000
#define USTORE_DATA_LOWER 0x004
#define USTORE_DATA_UPPER 0x008
#define CTX_ENABLES 0x018
#define CC_ENABLE 0x01c
#define CSR_CTX_POINTER 0x020
#define INDIRECT_CTX_STS 0x040
#define ACTIVE_CTX_STS 0x044
#define INDIRECT_CTX_SIG_EVENTS 0x048
#define INDIRECT_CTX_WAKEUP_EVENTS 0x050
#define NN_PUT 0x080
#define NN_GET 0x084
#define TIMESTAMP_LOW 0x0c0
#define TIMESTAMP_HIGH 0x0c4
#define T_INDEX_BYTE_INDEX 0x0f4
#define LOCAL_CSR_STATUS 0x180
u32 ixp2000_uengine_mask;
static void *ixp2000_uengine_csr_area(int uengine)
{
return ((void *)IXP_UENGINE_CSR_VIRT_BASE) + (uengine << 10);
}
/*
* LOCAL_CSR_STATUS=1 after a read or write to a microengine's CSR
* space means that the microengine we tried to access was also trying
* to access its own CSR space on the same clock cycle as we did. When
* this happens, we lose the arbitration process by default, and the
* read or write we tried to do was not actually performed, so we try
* again until it succeeds.
*/
u32 ixp2000_uengine_csr_read(int uengine, int offset)
{
void *uebase;
u32 *local_csr_status;
u32 *reg;
u32 value;
uebase = ixp2000_uengine_csr_area(uengine);
local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS);
reg = (u32 *)(uebase + offset);
do {
value = ixp2000_reg_read(reg);
} while (ixp2000_reg_read(local_csr_status) & 1);
return value;
}
EXPORT_SYMBOL(ixp2000_uengine_csr_read);
void ixp2000_uengine_csr_write(int uengine, int offset, u32 value)
{
void *uebase;
u32 *local_csr_status;
u32 *reg;
uebase = ixp2000_uengine_csr_area(uengine);
local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS);
reg = (u32 *)(uebase + offset);
do {
ixp2000_reg_write(reg, value);
} while (ixp2000_reg_read(local_csr_status) & 1);
}
EXPORT_SYMBOL(ixp2000_uengine_csr_write);
void ixp2000_uengine_reset(u32 uengine_mask)
{
u32 value;
value = ixp2000_reg_read(IXP_RESET1) & ~ixp2000_uengine_mask;
uengine_mask &= ixp2000_uengine_mask;
ixp2000_reg_wrb(IXP_RESET1, value | uengine_mask);
ixp2000_reg_wrb(IXP_RESET1, value);
}
EXPORT_SYMBOL(ixp2000_uengine_reset);
void ixp2000_uengine_set_mode(int uengine, u32 mode)
{
/*
* CTL_STR_PAR_EN: unconditionally enable parity checking on
* control store.
*/
mode |= 0x10000000;
ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mode);
/*
* Enable updating of condition codes.
*/
ixp2000_uengine_csr_write(uengine, CC_ENABLE, 0x00002000);
/*
* Initialise other per-microengine registers.
*/
ixp2000_uengine_csr_write(uengine, NN_PUT, 0x00);
ixp2000_uengine_csr_write(uengine, NN_GET, 0x00);
ixp2000_uengine_csr_write(uengine, T_INDEX_BYTE_INDEX, 0);
}
EXPORT_SYMBOL(ixp2000_uengine_set_mode);
static int make_even_parity(u32 x)
{
return hweight32(x) & 1;
}
static void ustore_write(int uengine, u64 insn)
{
/*
* Generate even parity for top and bottom 20 bits.
*/
insn |= (u64)make_even_parity((insn >> 20) & 0x000fffff) << 41;
insn |= (u64)make_even_parity(insn & 0x000fffff) << 40;
/*
* Write to microstore. The second write auto-increments
* the USTORE_ADDRESS index register.
*/
ixp2000_uengine_csr_write(uengine, USTORE_DATA_LOWER, (u32)insn);
ixp2000_uengine_csr_write(uengine, USTORE_DATA_UPPER, (u32)(insn >> 32));
}
void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns)
{
int i;
/*
* Start writing to microstore at address 0.
*/
ixp2000_uengine_csr_write(uengine, USTORE_ADDRESS, 0x80000000);
for (i = 0; i < insns; i++) {
u64 insn;
insn = (((u64)ucode[0]) << 32) |
(((u64)ucode[1]) << 24) |
(((u64)ucode[2]) << 16) |
(((u64)ucode[3]) << 8) |
((u64)ucode[4]);
ucode += 5;
ustore_write(uengine, insn);
}
/*
* Pad with a few NOPs at the end (to avoid the microengine
* aborting as it prefetches beyond the last instruction), unless
* we run off the end of the instruction store first, at which
* point the address register will wrap back to zero.
*/
for (i = 0; i < 4; i++) {
u32 addr;
addr = ixp2000_uengine_csr_read(uengine, USTORE_ADDRESS);
if (addr == 0x80000000)
break;
ustore_write(uengine, 0xf0000c0300ULL);
}
/*
* End programming.
*/
ixp2000_uengine_csr_write(uengine, USTORE_ADDRESS, 0x00000000);
}
EXPORT_SYMBOL(ixp2000_uengine_load_microcode);
void ixp2000_uengine_init_context(int uengine, int context, int pc)
{
/*
* Select the right context for indirect access.
*/
ixp2000_uengine_csr_write(uengine, CSR_CTX_POINTER, context);
/*
* Initialise signal masks to immediately go to Ready state.
*/
ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_SIG_EVENTS, 1);
ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_WAKEUP_EVENTS, 1);
/*
* Set program counter.
*/
ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_STS, pc);
}
EXPORT_SYMBOL(ixp2000_uengine_init_context);
void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask)
{
u32 mask;
/*
* Enable the specified context to go to Executing state.
*/
mask = ixp2000_uengine_csr_read(uengine, CTX_ENABLES);
mask |= ctx_mask << 8;
ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mask);
}
EXPORT_SYMBOL(ixp2000_uengine_start_contexts);
void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask)
{
u32 mask;
/*
* Disable the Ready->Executing transition. Note that this
* does not stop the context until it voluntarily yields.
*/
mask = ixp2000_uengine_csr_read(uengine, CTX_ENABLES);
mask &= ~(ctx_mask << 8);
ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mask);
}
EXPORT_SYMBOL(ixp2000_uengine_stop_contexts);
static int check_ixp_type(struct ixp2000_uengine_code *c)
{
u32 product_id;
u32 rev;
product_id = ixp2000_reg_read(IXP_PRODUCT_ID);
if (((product_id >> 16) & 0x1f) != 0)
return 0;
switch ((product_id >> 8) & 0xff) {
#ifdef CONFIG_ARCH_IXP2000
case 0: /* IXP2800 */
if (!(c->cpu_model_bitmask & 4))
return 0;
break;
case 1: /* IXP2850 */
if (!(c->cpu_model_bitmask & 8))
return 0;
break;
case 2: /* IXP2400 */
if (!(c->cpu_model_bitmask & 2))
return 0;
break;
#endif
#ifdef CONFIG_ARCH_IXP23XX
case 4: /* IXP23xx */
if (!(c->cpu_model_bitmask & 0x3f0))
return 0;
break;
#endif
default:
return 0;
}
rev = product_id & 0xff;
if (rev < c->cpu_min_revision || rev > c->cpu_max_revision)
return 0;
return 1;
}
static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
{
int offset;
int i;
offset = 0;
for (i = 0; i < 128; i++) {
u8 b3;
u8 b2;
u8 b1;
u8 b0;
b3 = (gpr_a[i] >> 24) & 0xff;
b2 = (gpr_a[i] >> 16) & 0xff;
b1 = (gpr_a[i] >> 8) & 0xff;
b0 = gpr_a[i] & 0xff;
// immed[@ai, (b1 << 8) | b0]
// 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII
ucode[offset++] = 0xf0;
ucode[offset++] = (b1 >> 4);
ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6);
ucode[offset++] = (b0 << 2);
ucode[offset++] = 0x80 | i;
// immed_w1[@ai, (b3 << 8) | b2]
// 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII
ucode[offset++] = 0xf4;
ucode[offset++] = 0x40 | (b3 >> 4);
ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6);
ucode[offset++] = (b2 << 2);
ucode[offset++] = 0x80 | i;
}
for (i = 0; i < 128; i++) {
u8 b3;
u8 b2;
u8 b1;
u8 b0;
b3 = (gpr_b[i] >> 24) & 0xff;
b2 = (gpr_b[i] >> 16) & 0xff;
b1 = (gpr_b[i] >> 8) & 0xff;
b0 = gpr_b[i] & 0xff;
// immed[@bi, (b1 << 8) | b0]
// 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV
ucode[offset++] = 0xf0;
ucode[offset++] = (b1 >> 4);
ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6);
ucode[offset++] = (i << 2) | 0x03;
ucode[offset++] = b0;
// immed_w1[@bi, (b3 << 8) | b2]
// 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV
ucode[offset++] = 0xf4;
ucode[offset++] = 0x40 | (b3 >> 4);
ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6);
ucode[offset++] = (i << 2) | 0x03;
ucode[offset++] = b2;
}
// ctx_arb[kill]
ucode[offset++] = 0xe0;
ucode[offset++] = 0x00;
ucode[offset++] = 0x01;
ucode[offset++] = 0x00;
ucode[offset++] = 0x00;
}
static int set_initial_registers(int uengine, struct ixp2000_uengine_code *c)
{
int per_ctx_regs;
u32 *gpr_a;
u32 *gpr_b;
u8 *ucode;
int i;
gpr_a = kzalloc(128 * sizeof(u32), GFP_KERNEL);
gpr_b = kzalloc(128 * sizeof(u32), GFP_KERNEL);
ucode = kmalloc(513 * 5, GFP_KERNEL);
if (gpr_a == NULL || gpr_b == NULL || ucode == NULL) {
kfree(ucode);
kfree(gpr_b);
kfree(gpr_a);
return 1;
}
per_ctx_regs = 16;
if (c->uengine_parameters & IXP2000_UENGINE_4_CONTEXTS)
per_ctx_regs = 32;
for (i = 0; i < 256; i++) {
struct ixp2000_reg_value *r = c->initial_reg_values + i;
u32 *bank;
int inc;
int j;
if (r->reg == -1)
break;
bank = (r->reg & 0x400) ? gpr_b : gpr_a;
inc = (r->reg & 0x80) ? 128 : per_ctx_regs;
j = r->reg & 0x7f;
while (j < 128) {
bank[j] = r->value;
j += inc;
}
}
generate_ucode(ucode, gpr_a, gpr_b);
ixp2000_uengine_load_microcode(uengine, ucode, 513);
ixp2000_uengine_init_context(uengine, 0, 0);
ixp2000_uengine_start_contexts(uengine, 0x01);
for (i = 0; i < 100; i++) {
u32 status;
status = ixp2000_uengine_csr_read(uengine, ACTIVE_CTX_STS);
if (!(status & 0x80000000))
break;
}
ixp2000_uengine_stop_contexts(uengine, 0x01);
kfree(ucode);
kfree(gpr_b);
kfree(gpr_a);
return !!(i == 100);
}
int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c)
{
int ctx;
if (!check_ixp_type(c))
return 1;
if (!(ixp2000_uengine_mask & (1 << uengine)))
return 1;
ixp2000_uengine_reset(1 << uengine);
ixp2000_uengine_set_mode(uengine, c->uengine_parameters);
if (set_initial_registers(uengine, c))
return 1;
ixp2000_uengine_load_microcode(uengine, c->insns, c->num_insns);
for (ctx = 0; ctx < 8; ctx++)
ixp2000_uengine_init_context(uengine, ctx, 0);
return 0;
}
EXPORT_SYMBOL(ixp2000_uengine_load);
static int __init ixp2000_uengine_init(void)
{
int uengine;
u32 value;
/*
* Determine number of microengines present.
*/
switch ((ixp2000_reg_read(IXP_PRODUCT_ID) >> 8) & 0x1fff) {
#ifdef CONFIG_ARCH_IXP2000
case 0: /* IXP2800 */
case 1: /* IXP2850 */
ixp2000_uengine_mask = 0x00ff00ff;
break;
case 2: /* IXP2400 */
ixp2000_uengine_mask = 0x000f000f;
break;
#endif
#ifdef CONFIG_ARCH_IXP23XX
case 4: /* IXP23xx */
ixp2000_uengine_mask = (*IXP23XX_EXP_CFG_FUSE >> 8) & 0xf;
break;
#endif
default:
printk(KERN_INFO "Detected unknown IXP2000 model (%.8x)\n",
(unsigned int)ixp2000_reg_read(IXP_PRODUCT_ID));
ixp2000_uengine_mask = 0x00000000;
break;
}
/*
* Reset microengines.
*/
ixp2000_uengine_reset(ixp2000_uengine_mask);
/*
* Synchronise timestamp counters across all microengines.
*/
value = ixp2000_reg_read(IXP_MISC_CONTROL);
ixp2000_reg_wrb(IXP_MISC_CONTROL, value & ~0x80);
for (uengine = 0; uengine < 32; uengine++) {
if (ixp2000_uengine_mask & (1 << uengine)) {
ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0);
ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0);
}
}
ixp2000_reg_wrb(IXP_MISC_CONTROL, value | 0x80);
return 0;
}
subsys_initcall(ixp2000_uengine_init);

View File

@@ -0,0 +1,92 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/mach/pci.h>
#define MAX_SLOTS 7
#define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
static int
via82c505_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
outl(CONFIG_CMD(bus,devfn,where),0xCF8);
switch (size) {
case 1:
*value=inb(0xCFC + (where&3));
break;
case 2:
*value=inw(0xCFC + (where&2));
break;
case 4:
*value=inl(0xCFC);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
outl(CONFIG_CMD(bus,devfn,where),0xCF8);
switch (size) {
case 1:
outb(value, 0xCFC + (where&3));
break;
case 2:
outw(value, 0xCFC + (where&2));
break;
case 4:
outl(value, 0xCFC);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops via82c505_ops = {
.read = via82c505_read_config,
.write = via82c505_write_config,
};
void __init via82c505_preinit(void)
{
printk(KERN_DEBUG "PCI: VIA 82c505\n");
if (!request_region(0xA8,2,"via config")) {
printk(KERN_WARNING"VIA 82c505: Unable to request region 0xA8\n");
return;
}
if (!request_region(0xCF8,8,"pci config")) {
printk(KERN_WARNING"VIA 82c505: Unable to request region 0xCF8\n");
release_region(0xA8, 2);
return;
}
/* Enable compatible Mode */
outb(0x96,0xA8);
outb(0x18,0xA9);
outb(0x93,0xA8);
outb(0xd0,0xA9);
}
int __init via82c505_setup(int nr, struct pci_sys_data *sys)
{
return (nr == 0);
}
struct pci_bus * __init via82c505_scan_bus(int nr, struct pci_sys_data *sysdata)
{
if (nr == 0)
return pci_scan_bus(0, &via82c505_ops, sysdata);
return NULL;
}

View File

@@ -0,0 +1,402 @@
/*
* linux/arch/arm/common/vic.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <asm/mach/irq.h>
#include <asm/hardware/vic.h>
static void vic_ack_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
irq &= 31;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
/* moreover, clear the soft-triggered, in case it was the reason */
writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
}
static void vic_mask_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
irq &= 31;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
}
static void vic_unmask_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
irq &= 31;
writel(1 << irq, base + VIC_INT_ENABLE);
}
/**
* vic_init2 - common initialisation code
* @base: Base of the VIC.
*
* Common initialisation code for registeration
* and resume.
*/
static void vic_init2(void __iomem *base)
{
int i;
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
writel(VIC_VECT_CNTL_ENABLE | i, reg);
}
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
#if defined(CONFIG_PM)
/**
* struct vic_device - VIC PM device
* @sysdev: The system device which is registered.
* @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
* @int_select: Save for VIC_INT_SELECT.
* @int_enable: Save for VIC_INT_ENABLE.
* @soft_int: Save for VIC_INT_SOFT.
* @protect: Save for VIC_PROTECT.
*/
struct vic_device {
struct sys_device sysdev;
void __iomem *base;
int irq;
u32 resume_sources;
u32 resume_irqs;
u32 int_select;
u32 int_enable;
u32 soft_int;
u32 protect;
};
/* we cannot allocate memory when VICs are initially registered */
static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
static inline struct vic_device *to_vic(struct sys_device *sys)
{
return container_of(sys, struct vic_device, sysdev);
}
static int vic_id;
static int vic_class_resume(struct sys_device *dev)
{
struct vic_device *vic = to_vic(dev);
void __iomem *base = vic->base;
printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
/* re-initialise static settings */
vic_init2(base);
writel(vic->int_select, base + VIC_INT_SELECT);
writel(vic->protect, base + VIC_PROTECT);
/* set the enabled ints and then clear the non-enabled */
writel(vic->int_enable, base + VIC_INT_ENABLE);
writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
/* and the same for the soft-int register */
writel(vic->soft_int, base + VIC_INT_SOFT);
writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
return 0;
}
static int vic_class_suspend(struct sys_device *dev, pm_message_t state)
{
struct vic_device *vic = to_vic(dev);
void __iomem *base = vic->base;
printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
vic->int_select = readl(base + VIC_INT_SELECT);
vic->int_enable = readl(base + VIC_INT_ENABLE);
vic->soft_int = readl(base + VIC_INT_SOFT);
vic->protect = readl(base + VIC_PROTECT);
/* set the interrupts (if any) that are used for
* resuming the system */
writel(vic->resume_irqs, base + VIC_INT_ENABLE);
writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
return 0;
}
struct sysdev_class vic_class = {
.name = "vic",
.suspend = vic_class_suspend,
.resume = vic_class_resume,
};
/**
* vic_pm_register - Register a VIC for later power management control
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
* @resume_sources: bitmask of interrupts allowed for resume sources.
*
* Register the VIC with the system device tree so that it can be notified
* of suspend and resume requests and ensure that the correct actions are
* taken to re-instate the settings on resume.
*/
static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
{
struct vic_device *v;
if (vic_id >= ARRAY_SIZE(vic_devices))
printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
else {
v = &vic_devices[vic_id];
v->base = base;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
}
}
/**
* vic_pm_init - initicall to register VIC pm
*
* This is called via late_initcall() to register
* the resources for the VICs due to the early
* nature of the VIC's registration.
*/
static int __init vic_pm_init(void)
{
struct vic_device *dev = vic_devices;
int err;
int id;
if (vic_id == 0)
return 0;
err = sysdev_class_register(&vic_class);
if (err) {
printk(KERN_ERR "%s: cannot register class\n", __func__);
return err;
}
for (id = 0; id < vic_id; id++, dev++) {
dev->sysdev.id = id;
dev->sysdev.cls = &vic_class;
err = sysdev_register(&dev->sysdev);
if (err) {
printk(KERN_ERR "%s: failed to register device\n",
__func__);
return err;
}
}
return 0;
}
late_initcall(vic_pm_init);
static struct vic_device *vic_from_irq(unsigned int irq)
{
struct vic_device *v = vic_devices;
unsigned int base_irq = irq & ~31;
int id;
for (id = 0; id < vic_id; id++, v++) {
if (v->irq == base_irq)
return v;
}
return NULL;
}
static int vic_set_wake(unsigned int irq, unsigned int on)
{
struct vic_device *v = vic_from_irq(irq);
unsigned int off = irq & 31;
u32 bit = 1 << off;
if (!v)
return -EINVAL;
if (!(bit & v->resume_sources))
return -EINVAL;
if (on)
v->resume_irqs |= bit;
else
v->resume_irqs &= ~bit;
return 0;
}
#else
static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
#define vic_set_wake NULL
#endif /* CONFIG_PM */
static struct irq_chip vic_chip = {
.name = "VIC",
.ack = vic_ack_irq,
.mask = vic_mask_irq,
.unmask = vic_unmask_irq,
.set_wake = vic_set_wake,
};
/* The PL190 cell from ARM has been modified by ST, so handle both here */
static void vik_init_st(void __iomem *base, unsigned int irq_start,
u32 vic_sources);
/**
* vic_init - initialise a vectored interrupt controller
* @base: iomem base address
* @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*/
void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
{
unsigned int i;
u32 cellid = 0;
enum amba_vendor vendor;
/* Identify which VIC cell this one is, by reading the ID */
for (i = 0; i < 4; i++) {
u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
cellid |= (readl(addr) & 0xff) << (8 * i);
}
vendor = (cellid >> 12) & 0xff;
printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
base, cellid, vendor);
switch(vendor) {
case AMBA_VENDOR_ST:
vik_init_st(base, irq_start, vic_sources);
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
/* fall through */
case AMBA_VENDOR_ARM:
break;
}
/* Disable all interrupts initially. */
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
/*
* Make sure we clear all existing interrupts
*/
writel(0, base + VIC_PL190_VECT_ADDR);
for (i = 0; i < 19; i++) {
unsigned int value;
value = readl(base + VIC_PL190_VECT_ADDR);
writel(value, base + VIC_PL190_VECT_ADDR);
}
vic_init2(base);
for (i = 0; i < 32; i++) {
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
set_irq_chip(irq, &vic_chip);
set_irq_chip_data(irq, base);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
vic_pm_register(base, irq_start, resume_sources);
}
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
* replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
* the probe function is called twice, with base set to offset 000
* and 020 within the page. We call this "second block".
*/
static void __init vik_init_st(void __iomem *base, unsigned int irq_start,
u32 vic_sources)
{
unsigned int i;
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
/* Disable all interrupts initially. */
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
/*
* Make sure we clear all existing interrupts. The vector registers
* in this cell are after the second block of general registers,
* so we can address them using standard offsets, but only from
* the second base address, which is 0x20 in the page
*/
if (vic_2nd_block) {
writel(0, base + VIC_PL190_VECT_ADDR);
for (i = 0; i < 19; i++) {
unsigned int value;
value = readl(base + VIC_PL190_VECT_ADDR);
writel(value, base + VIC_PL190_VECT_ADDR);
}
/* ST has 16 vectors as well, but we don't enable them by now */
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
writel(0, reg);
}
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
for (i = 0; i < 32; i++) {
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
set_irq_chip(irq, &vic_chip);
set_irq_chip_data(irq, base);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
}