add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

1
kernel/arch/sh/include/asm/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
machtypes.h

View File

@@ -0,0 +1,8 @@
include include/asm-generic/Kbuild.asm
header-y += cachectl.h cpu-features.h
unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
unifdef-y += posix_types_32.h
unifdef-y += posix_types_64.h

View File

@@ -0,0 +1,13 @@
#ifndef __ASM_ADC_H
#define __ASM_ADC_H
#ifdef __KERNEL__
/*
* Copyright (C) 2004 Andriy Skulysh
*/
#include <cpu/adc.h>
int adc_single(unsigned int channel);
#endif /* __KERNEL__ */
#endif /* __ASM_ADC_H */

View File

@@ -0,0 +1,64 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Kaz Kojima
*
* Defitions for the address spaces of the SH CPUs.
*/
#ifndef __ASM_SH_ADDRSPACE_H
#define __ASM_SH_ADDRSPACE_H
#ifdef __KERNEL__
#include <cpu/addrspace.h>
/* If this CPU supports segmentation, hook up the helpers */
#ifdef P1SEG
/*
[ P0/U0 (virtual) ] 0x00000000 <------ User space
[ P1 (fixed) cached ] 0x80000000 <------ Kernel space
[ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
[ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
[ P4 control ] 0xE0000000
*/
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
/* Returns the physical address of a PnSEG (n=1,2) address */
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
/*
* Map an address to a certain privileged segment
*/
#define P1SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG))
#define P2SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG))
#define P3SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
#define P4SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
#endif /* 29BIT || PMB_FIXED */
#endif /* P1SEG */
/* Check if an address can be reached in 29 bits */
#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000)
#ifdef CONFIG_SH_STORE_QUEUES
/*
* This is a special case for the SH-4 store queues, as pages for this
* space still need to be faulted in before it's possible to flush the
* store queue cache for writeout to the remapped region.
*/
#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
#else
#define P3_ADDR_MAX P4SEG
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_ADDRSPACE_H */

View File

@@ -0,0 +1,169 @@
#ifndef __ASM_SH_ATOMIC_GRB_H
#define __ASM_SH_ATOMIC_GRB_H
static inline void atomic_add(int i, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" add %2, %0 \n\t" /* add */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (i)
: "memory" , "r0", "r1");
}
static inline void atomic_sub(int i, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" sub %2, %0 \n\t" /* sub */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (i)
: "memory" , "r0", "r1");
}
static inline int atomic_add_return(int i, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" add %2, %0 \n\t" /* add */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (i)
: "memory" , "r0", "r1");
return tmp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" sub %2, %0 \n\t" /* sub */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (i)
: "memory", "r0", "r1");
return tmp;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
int tmp;
unsigned int _mask = ~mask;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" and %2, %0 \n\t" /* add */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (_mask)
: "memory" , "r0", "r1");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" or %2, %0 \n\t" /* or */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t"
" nop \n\t"
" mov r15, r1 \n\t"
" mov #-8, r15 \n\t"
" mov.l @%1, %0 \n\t"
" cmp/eq %2, %0 \n\t"
" bf 1f \n\t"
" mov.l %3, @%1 \n\t"
"1: mov r1, r15 \n\t"
: "=&r" (ret)
: "r" (v), "r" (old), "r" (new)
: "memory" , "r0", "r1" , "t");
return ret;
}
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t"
" nop \n\t"
" mov r15, r1 \n\t"
" mov #-12, r15 \n\t"
" mov.l @%2, %1 \n\t"
" mov %1, %0 \n\t"
" cmp/eq %4, %0 \n\t"
" bt/s 1f \n\t"
" add %3, %1 \n\t"
" mov.l %1, @%2 \n\t"
"1: mov r1, r15 \n\t"
: "=&r" (ret), "=&r" (tmp)
: "r" (v), "r" (a), "r" (u)
: "memory" , "r0", "r1" , "t");
return ret != u;
}
#endif /* __ASM_SH_ATOMIC_GRB_H */

View File

@@ -0,0 +1,71 @@
#ifndef __ASM_SH_ATOMIC_IRQ_H
#define __ASM_SH_ATOMIC_IRQ_H
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter += i;
raw_local_irq_restore(flags);
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter -= i;
raw_local_irq_restore(flags);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp, flags;
raw_local_irq_save(flags);
temp = v->counter;
temp += i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp, flags;
raw_local_irq_save(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter &= ~mask;
raw_local_irq_restore(flags);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif /* __ASM_SH_ATOMIC_IRQ_H */

View File

@@ -0,0 +1,134 @@
#ifndef __ASM_SH_ATOMIC_LLSC_H
#define __ASM_SH_ATOMIC_LLSC_H
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
}
/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
* encoding, so the retval is automatically set without having to
* do any special work.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add_return \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub_return \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
return temp;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
}
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#endif /* __ASM_SH_ATOMIC_LLSC_H */

View File

@@ -0,0 +1,90 @@
#ifndef __ASM_SH_ATOMIC_H
#define __ASM_SH_ATOMIC_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
*/
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/system.h>
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/atomic-llsc.h>
#else
#include <asm/atomic-irq.h>
#endif
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (ret != u)
v->counter += a;
local_irq_restore(flags);
return ret != u;
}
#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic-long.h>
#include <asm-generic/atomic64.h>
#endif /* __ASM_SH_ATOMIC_H */

View File

@@ -0,0 +1,36 @@
#ifndef __ASM_SH_AUXVEC_H
#define __ASM_SH_AUXVEC_H
/*
* Architecture-neutral AT_ values in 0-17, leave some room
* for more of them.
*/
/*
* This entry gives some information about the FPU initialization
* performed by the kernel.
*/
#define AT_FPUCW 18 /* Used FPU control word. */
#if defined(CONFIG_VSYSCALL) || !defined(__KERNEL__)
/*
* Only define this in the vsyscall case, the entry point to
* the vsyscall page gets placed here. The kernel will attempt
* to build a gate VMA we don't care about otherwise..
*/
#define AT_SYSINFO_EHDR 33
#endif
/*
* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
* value is -1, then the cache doesn't exist. Otherwise:
*
* bit 0-3: Cache set-associativity; 0 means fully associative.
* bit 4-7: Log2 of cacheline size.
* bit 8-31: Size of the entire cache >> 8.
*/
#define AT_L1I_CACHESHAPE 34
#define AT_L1D_CACHESHAPE 35
#define AT_L2_CACHESHAPE 36
#endif /* __ASM_SH_AUXVEC_H */

View File

@@ -0,0 +1,172 @@
#ifndef __ASM_SH_BITOPS_GRB_H
#define __ASM_SH_BITOPS_GRB_H
static inline void set_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" or %2, %0 \n\t" /* or */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline void clear_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = ~(1 << (nr & 0x1f));
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" and %2, %0 \n\t" /* and */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline void change_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" xor %2, %0 \n\t" /* xor */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-14, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%2, %0 \n\t" /* load old value */
" mov %0, %1 \n\t"
" tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
" mov #-1, %1 \n\t" /* retvat = -1 */
" negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
" or %3, %0 \n\t"
" mov.l %0, @%2 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"=&r" (retval),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1" ,"t");
return retval;
}
static inline int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval,not_mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
not_mask = ~mask;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-14, r15 \n\t" /* LOGIN */
" mov.l @%2, %0 \n\t" /* load old value */
" mov %0, %1 \n\t" /* %1 = *a */
" tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
" mov #-1, %1 \n\t" /* retvat = -1 */
" negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
" and %4, %0 \n\t"
" mov.l %0, @%2 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"=&r" (retval),
"+r" (a)
: "r" (mask),
"r" (not_mask)
: "memory" , "r0", "r1", "t");
return retval;
}
static inline int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-14, r15 \n\t" /* LOGIN */
" mov.l @%2, %0 \n\t" /* load old value */
" mov %0, %1 \n\t" /* %1 = *a */
" tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
" mov #-1, %1 \n\t" /* retvat = -1 */
" negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
" xor %3, %0 \n\t"
" mov.l %0, @%2 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"=&r" (retval),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1", "t");
return retval;
}
#include <asm-generic/bitops/non-atomic.h>
#endif /* __ASM_SH_BITOPS_GRB_H */

View File

@@ -0,0 +1,146 @@
#ifndef __ASM_SH_BITOPS_LLSC_H
#define __ASM_SH_BITOPS_LLSC_H
static inline void set_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! set_bit \n\t"
"or %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (a), "r" (mask)
: "t", "memory"
);
}
static inline void clear_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! clear_bit \n\t"
"and %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (a), "r" (~mask)
: "t", "memory"
);
}
static inline void change_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! change_bit \n\t"
"xor %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (a), "r" (mask)
: "t", "memory"
);
}
static inline int test_and_set_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! test_and_set_bit \n\t"
"mov %0, %1 \n\t"
"or %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"and %3, %1 \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (a), "r" (mask)
: "t", "memory"
);
return retval != 0;
}
static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! test_and_clear_bit \n\t"
"mov %0, %1 \n\t"
"and %4, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"and %3, %1 \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (a), "r" (mask), "r" (~mask)
: "t", "memory"
);
return retval != 0;
}
static inline int test_and_change_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! test_and_change_bit \n\t"
"mov %0, %1 \n\t"
"xor %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"and %3, %1 \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (a), "r" (mask)
: "t", "memory"
);
return retval != 0;
}
#include <asm-generic/bitops/non-atomic.h>
#endif /* __ASM_SH_BITOPS_LLSC_H */

View File

@@ -0,0 +1,142 @@
#ifndef __ASM_SH_BITOPS_OP32_H
#define __ASM_SH_BITOPS_OP32_H
/*
* The bit modifying instructions on SH-2A are only capable of working
* with a 3-bit immediate, which signifies the shift position for the bit
* being worked on.
*/
#if defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE)
#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE)
#else
#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
#endif
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
__asm__ __volatile__ (
"bset.b %1, @(%O2,%0) ! __set_bit\n\t"
: "+r" (addr)
: "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr))
: "t", "memory"
);
} else {
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
__asm__ __volatile__ (
"bclr.b %1, @(%O2,%0) ! __clear_bit\n\t"
: "+r" (addr)
: "i" (BYTE_OFFSET(nr)),
"i" (BYTE_NUMBER(nr))
: "t", "memory"
);
} else {
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
__asm__ __volatile__ (
"bxor.b %1, @(%O2,%0) ! __change_bit\n\t"
: "+r" (addr)
: "i" (BYTE_OFFSET(nr)),
"i" (BYTE_NUMBER(nr))
: "t", "memory"
);
} else {
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
#endif /* __ASM_SH_BITOPS_OP32_H */

View File

@@ -0,0 +1,106 @@
#ifndef __ASM_SH_BITOPS_H
#define __ASM_SH_BITOPS_H
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <asm/system.h>
/* For __swab32 */
#include <asm/byteorder.h>
#ifdef CONFIG_GUSA_RB
#include <asm/bitops-grb.h>
#elif defined(CONFIG_CPU_SH2A)
#include <asm-generic/bitops/atomic.h>
#include <asm/bitops-op32.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/bitops-llsc.h>
#else
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#endif
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
__asm__("1:\n\t"
"shlr %1\n\t"
"bt/s 1b\n\t"
" add #1, %0"
: "=r" (result), "=r" (word)
: "0" (~0L), "1" (word)
: "t");
return result;
}
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
unsigned long result;
__asm__("1:\n\t"
"shlr %1\n\t"
"bf/s 1b\n\t"
" add #1, %0"
: "=r" (result), "=r" (word)
: "0" (~0L), "1" (word)
: "t");
return result;
}
#else
static inline unsigned long ffz(unsigned long word)
{
unsigned long result, __d2, __d3;
__asm__("gettr tr0, %2\n\t"
"pta $+32, tr0\n\t"
"andi %1, 1, %3\n\t"
"beq %3, r63, tr0\n\t"
"pta $+4, tr0\n"
"0:\n\t"
"shlri.l %1, 1, %1\n\t"
"addi %0, 1, %0\n\t"
"andi %1, 1, %3\n\t"
"beqi %3, 1, tr0\n"
"1:\n\t"
"ptabs %2, tr0\n\t"
: "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
: "0" (0L), "1" (word));
return result;
}
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_BITOPS_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/bitsperlong.h>

View File

@@ -0,0 +1,110 @@
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
#define BUGFLAG_UNWINDER (1 << 1)
#ifdef CONFIG_GENERIC_BUG
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
/**
* _EMIT_BUG_ENTRY
* %1 - __FILE__
* %2 - __LINE__
* %3 - trap type
* %4 - sizeof(struct bug_entry)
*
* The trapa opcode itself sits in %0.
* The %O notation is used to avoid # generation.
*
* The offending file and line are encoded in the __bug_table section.
*/
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t.long 1b, %O1\n" \
"\t.short %O2, %O3\n" \
"\t.org 2b+%O4\n" \
"\t.popsection\n"
#else
#define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t.long 1b\n" \
"\t.short %O3\n" \
"\t.org 2b+%O4\n" \
"\t.popsection\n"
#endif
#define BUG() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define __WARN() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), \
"i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
__WARN(); \
} else { \
if (unlikely(__ret_warn_on)) \
__WARN(); \
} \
unlikely(__ret_warn_on); \
})
#define UNWINDER_BUG() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), \
"i" (BUGFLAG_UNWINDER), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define UNWINDER_BUG_ON(x) ({ \
int __ret_unwinder_on = !!(x); \
if (__builtin_constant_p(__ret_unwinder_on)) { \
if (__ret_unwinder_on) \
UNWINDER_BUG(); \
} else { \
if (unlikely(__ret_unwinder_on)) \
UNWINDER_BUG(); \
} \
unlikely(__ret_unwinder_on); \
})
#else
#define UNWINDER_BUG BUG
#define UNWINDER_BUG_ON BUG_ON
#endif /* CONFIG_GENERIC_BUG */
#include <asm-generic/bug.h>
#endif /* __ASM_SH_BUG_H */

View File

@@ -0,0 +1,73 @@
#ifndef __ASM_SH_BUGS_H
#define __ASM_SH_BUGS_H
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
/*
* I don't know of any Super-H bugs yet.
*/
#include <asm/processor.h>
static void __init check_bugs(void)
{
extern unsigned long loops_per_jiffy;
char *p = &init_utsname()->machine[2]; /* "sh" */
current_cpu_data.loops_per_jiffy = loops_per_jiffy;
switch (current_cpu_data.family) {
case CPU_FAMILY_SH2:
*p++ = '2';
break;
case CPU_FAMILY_SH2A:
*p++ = '2';
*p++ = 'a';
break;
case CPU_FAMILY_SH3:
*p++ = '3';
break;
case CPU_FAMILY_SH4:
*p++ = '4';
break;
case CPU_FAMILY_SH4A:
*p++ = '4';
*p++ = 'a';
break;
case CPU_FAMILY_SH4AL_DSP:
*p++ = '4';
*p++ = 'a';
*p++ = 'l';
*p++ = '-';
*p++ = 'd';
*p++ = 's';
*p++ = 'p';
break;
case CPU_FAMILY_SH5:
*p++ = '6';
*p++ = '4';
break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than
* default:, so we're able to have the compiler whine
* about unhandled enumerations.
*/
break;
}
printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
#ifndef __LITTLE_ENDIAN__
/* 'eb' means 'Endian Big' */
*p++ = 'e';
*p++ = 'b';
#endif
*p = '\0';
}
#endif /* __ASM_SH_BUGS_H */

View File

@@ -0,0 +1,10 @@
#ifndef __ASM_SH_BYTEORDER_H
#define __ASM_SH_BYTEORDER_H
#ifdef __LITTLE_ENDIAN__
#include <linux/byteorder/little_endian.h>
#else
#include <linux/byteorder/big_endian.h>
#endif
#endif /* __ASM_SH_BYTEORDER_H */

View File

@@ -0,0 +1,51 @@
/* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
*
* include/asm-sh/cache.h
*
* Copyright 1999 (C) Niibe Yutaka
* Copyright 2002, 2003 (C) Paul Mundt
*/
#ifndef __ASM_SH_CACHE_H
#define __ASM_SH_CACHE_H
#ifdef __KERNEL__
#include <linux/init.h>
#include <cpu/cache.h>
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#ifndef __ASSEMBLY__
struct cache_info {
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
unsigned int linesz; /* Cache line size (bytes) */
unsigned int way_size; /* sets * line size */
/*
* way_incr is the address offset for accessing the next way
* in memory mapped cache array ops.
*/
unsigned int way_incr;
unsigned int entry_shift;
unsigned int entry_mask;
/*
* Compute a mask which selects the address bits which overlap between
* 1. those used to select the cache set during indexing
* 2. those in the physical page number.
*/
unsigned int alias_mask;
unsigned int n_aliases; /* Number of aliases */
unsigned long flags;
};
int __init detect_cpu_and_cache_system(void);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */

View File

@@ -0,0 +1,23 @@
#ifndef _SH_CACHECTL_H
#define _SH_CACHECTL_H
/* Definitions for the cacheflush system call. */
#define CACHEFLUSH_D_INVAL 0x1 /* invalidate (without write back) */
#define CACHEFLUSH_D_WB 0x2 /* write back (without invalidate) */
#define CACHEFLUSH_D_PURGE 0x3 /* writeback and invalidate */
#define CACHEFLUSH_D_MASK 0x3
#define CACHEFLUSH_I 0x4
#define CACHEFLUSH_D_L2 0x8
/*
* Options for cacheflush system call
*/
#define ICACHE CACHEFLUSH_I /* flush instruction cache */
#define DCACHE CACHEFLUSH_D_PURGE /* writeback and flush data cache */
#define BCACHE (ICACHE|DCACHE) /* flush both caches */
#endif /* _SH_CACHECTL_H */

View File

@@ -0,0 +1,121 @@
#ifndef __ASM_SH_CACHEFLUSH_H
#define __ASM_SH_CACHEFLUSH_H
#ifdef __KERNEL__
#include <linux/mm.h>
#include <asm/l2_cacheflush.h>
/*
* Cache flushing:
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
* - flush_icache_range(start, end) flushes(invalidates) a range for icache
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
* - flush_cache_sigtramp(vaddr) flushes the signal trampoline
*/
extern void (*local_flush_cache_all)(void *args);
extern void (*local_flush_cache_mm)(void *args);
extern void (*local_flush_cache_dup_mm)(void *args);
extern void (*local_flush_cache_page)(void *args);
extern void (*local_flush_cache_range)(void *args);
extern void (*local_flush_dcache_page)(void *args);
extern void (*local_flush_icache_range)(void *args);
extern void (*local_flush_icache_page)(void *args);
extern void (*local_flush_cache_sigtramp)(void *args);
static inline void cache_noop(void *args) { }
extern void (*__flush_wback_region)(void *start, int size);
extern void (*__flush_purge_region)(void *start, int size);
extern void (*__flush_invalidate_region)(void *start, int size);
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_dup_mm(struct mm_struct *mm);
extern void flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn);
extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void flush_dcache_page(struct page *page);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma,
struct page *page);
extern void flush_cache_sigtramp(unsigned long address);
struct flusher_data {
struct vm_area_struct *vma;
unsigned long addr1, addr2;
};
#define ARCH_HAS_FLUSH_ANON_PAGE
extern void __flush_anon_page(struct page *page, unsigned long);
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
__flush_anon_page(page, vmaddr);
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr(unsigned long addr);
static inline void flush_kernel_dcache_page(struct page *page)
{
flush_kernel_dcache_page_addr((unsigned long)page_address(page));
}
extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
unsigned offset, size_t size)
{
void *start = (void __force *)virt + offset;
__flush_purge_region(start, size);
__l2_flush_purge_phys(phys + offset, size);
}
static inline void writeback_ioremap_region(unsigned long phys, void __iomem *virt,
unsigned offset, size_t size)
{
void *start = (void __force *)virt + offset;
__flush_wback_region(start, size);
__l2_flush_wback_phys(phys + offset, size);
}
static inline void invalidate_ioremap_region(unsigned long phys, void __iomem *virt,
unsigned offset, size_t size)
{
void *start = (void __force *)virt + offset;
__flush_invalidate_region(start, size);
__l2_flush_invalidate_phys(phys + offset, size);
}
void kmap_coherent_init(void);
void *kmap_coherent(struct page *page, unsigned long addr);
void kunmap_coherent(void *kvaddr);
#define PG_dcache_dirty PG_arch_1
void cpu_cache_init(void);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */

View File

@@ -0,0 +1,5 @@
#ifdef CONFIG_SUPERH32
# include "checksum_32.h"
#else
# include <asm-generic/checksum.h>
#endif

View File

@@ -0,0 +1,215 @@
#ifndef __ASM_SH_CHECKSUM_H
#define __ASM_SH_CHECKSUM_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
*/
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int __dummy;
__asm__("swap.w %0, %1\n\t"
"extu.w %0, %0\n\t"
"extu.w %1, %1\n\t"
"add %1, %0\n\t"
"swap.w %0, %1\n\t"
"add %1, %0\n\t"
"not %0, %0\n\t"
: "=r" (sum), "=&r" (__dummy)
: "0" (sum)
: "t");
return (__force __sum16)sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
* for linux by * Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum, __dummy0, __dummy1;
__asm__ __volatile__(
"mov.l @%1+, %0\n\t"
"mov.l @%1+, %3\n\t"
"add #-2, %2\n\t"
"clrt\n\t"
"1:\t"
"addc %3, %0\n\t"
"movt %4\n\t"
"mov.l @%1+, %3\n\t"
"dt %2\n\t"
"bf/s 1b\n\t"
" cmp/eq #1, %4\n\t"
"addc %3, %0\n\t"
"addc %2, %0" /* Here %2 is 0, add carry-bit */
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
: "1" (iph), "2" (ihl)
: "t", "memory");
return csum_fold(sum);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
#ifdef __LITTLE_ENDIAN__
unsigned long len_proto = (proto + len) << 8;
#else
unsigned long len_proto = proto + len;
#endif
__asm__("clrt\n\t"
"addc %0, %1\n\t"
"addc %2, %1\n\t"
"addc %3, %1\n\t"
"movt %0\n\t"
"add %1, %0"
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
: "t");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
unsigned int __dummy;
__asm__("clrt\n\t"
"mov.l @(0,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(4,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(8,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(12,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(0,%3), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(4,%3), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(8,%3), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(12,%3), %1\n\t"
"addc %1, %0\n\t"
"addc %4, %0\n\t"
"addc %5, %0\n\t"
"movt %1\n\t"
"add %1, %0\n"
: "=r" (sum), "=&r" (__dummy)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "t");
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic((__force const void *)src,
dst, len, sum, NULL, err_ptr);
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif /* __ASM_SH_CHECKSUM_H */

View File

@@ -0,0 +1,31 @@
/*
* Copyright (C) 2010 Paul Mundt <lethal@linux-sh.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __CLKDEV__H_
#define __CLKDEV__H_
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/clock.h>
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
if (!slab_is_available())
return alloc_bootmem_low_pages(size);
else
return kzalloc(size, GFP_KERNEL);
}
#define __clk_put(clk)
#define __clk_get(clk) ({ 1; })
#endif /* __CLKDEV_H__ */

View File

@@ -0,0 +1,21 @@
#ifndef __ASM_SH_CLOCK_H
#define __ASM_SH_CLOCK_H
#ifdef CONFIG_SH_CLK
#include <linux/sh_clk.h>
#endif
#ifdef CONFIG_STM_DRIVERS
#include <linux/stm/clk.h>
#endif
/* Should be defined by processor-specific code */
void __deprecated arch_init_clk_ops(struct clk_ops **, int type);
int __init arch_clk_init(void);
/* arch/sh/kernel/cpu/clock-cpg.c */
int __init __deprecated cpg_clk_init(void);
/* arch/sh/kernel/cpu/clock.c */
int clk_init(void);
#endif /* __ASM_SH_CLOCK_H */

View File

@@ -0,0 +1,76 @@
#ifndef __ASM_SH_CMPXCHG_GRB_H
#define __ASM_SH_CMPXCHG_GRB_H
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-4, r15 \n\t" /* LOGIN */
" mov.l @%1, %0 \n\t" /* load old value */
" mov.l %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* when val == r15 this function will not work as expected.
* So val is added to output constriants */
:
: "memory", "r0", "r1");
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN */
" mov.b @%1, %0 \n\t" /* load old value */
" extu.b %0, %0 \n\t" /* extend as unsigned */
" mov.b %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* If val == r15 code doesn't work.
* So make val input/output constraint
* preventing compiler using r15 directly */
:
: "memory" , "r0", "r1");
return retval;
}
static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
unsigned long new)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-8, r15 \n\t" /* LOGIN */
" mov.l @%3, %0 \n\t" /* load old value */
" cmp/eq %0, %1 \n\t"
" bf 1f \n\t" /* if not equal */
" mov.l %2, @%3 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (old), "+r" (new) /* old or new can be r15 so
* force into temporary register */
: "r" (m)
: "memory" , "r0", "r1", "t");
return retval;
}
#endif /* __ASM_SH_CMPXCHG_GRB_H */

View File

@@ -0,0 +1,40 @@
#ifndef __ASM_SH_CMPXCHG_IRQ_H
#define __ASM_SH_CMPXCHG_IRQ_H
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long flags, retval;
local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags);
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long flags, retval;
local_irq_save(flags);
retval = *m;
*m = val & 0xff;
local_irq_restore(flags);
return retval;
}
static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
unsigned long new)
{
__u32 retval;
unsigned long flags;
local_irq_save(flags);
retval = *m;
if (retval == old)
*m = new;
local_irq_restore(flags); /* implies memory barrier */
return retval;
}
#endif /* __ASM_SH_CMPXCHG_IRQ_H */

View File

@@ -0,0 +1,71 @@
#ifndef __ASM_SH_CMPXCHG_LLSC_H
#define __ASM_SH_CMPXCHG_LLSC_H
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! xchg_u32 \n\t"
"mov %0, %1 \n\t"
"mov %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z"(tmp), "=&r" (retval)
: "r" (m), "r" (val)
: "t", "memory"
);
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! xchg_u8 \n\t"
"mov %0, %1 \n\t"
"mov %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z"(tmp), "=&r" (retval)
: "r" (m), "r" (val & 0xff)
: "t", "memory"
);
return retval;
}
static inline unsigned long
__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! __cmpxchg_u32 \n\t"
"mov %0, %1 \n\t"
"cmp/eq %1, %3 \n\t"
"bf 2f \n\t"
"mov %4, %0 \n\t"
"2: \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (m), "r" (old), "r" (new)
: "t", "memory"
);
return retval;
}
#endif /* __ASM_SH_CMPXCHG_LLSC_H */

View File

@@ -0,0 +1,29 @@
#ifndef __ASM_SH_CPU_FEATURES_H
#define __ASM_SH_CPU_FEATURES_H
/*
* Processor flags
*
* Note: When adding a new flag, keep cpu_flags[] in
* arch/sh/kernel/setup.c in sync so symbolic name
* mapping of the processor flags has a chance of being
* reasonably accurate.
*
* These flags are also available through the ELF
* auxiliary vector as AT_HWCAP.
*/
#define CPU_HAS_FPU 0x0001 /* Hardware FPU support */
#define CPU_HAS_P2_FLUSH_BUG 0x0002 /* Need to flush the cache in P2 area */
#define CPU_HAS_MMU_PAGE_ASSOC 0x0004 /* SH3: TLB way selection bit support */
#define CPU_HAS_DSP 0x0008 /* SH-DSP: DSP support */
#define CPU_HAS_PERF_COUNTER 0x0010 /* Hardware performance counters */
#define CPU_HAS_PTEA 0x0020 /* PTEA register */
#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */
#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */
#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */
#define CPU_HAS_ICBI 0x0400 /* icbi instruction */
#define CPU_HAS_SYNCO 0x0800 /* synco instruction */
#define CPU_HAS_FPCHG 0x1000 /* fpchg instruction */
#endif /* __ASM_SH_CPU_FEATURES_H */

View File

@@ -0,0 +1,6 @@
#ifndef __SH_CPUTIME_H
#define __SH_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SH_CPUTIME_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/current.h>

View File

@@ -0,0 +1,26 @@
#ifndef __ASM_SH_DELAY_H
#define __ASM_SH_DELAY_H
/*
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/sh/lib/delay.c
*/
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
__udelay(n))
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
#endif /* __ASM_SH_DELAY_H */

View File

@@ -0,0 +1,28 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
struct dev_archdata {
};
struct platform_device;
/* allocate contiguous memory chunk and fill in struct resource */
int platform_resource_setup_memory(struct platform_device *pdev,
char *name, unsigned long memsize);
void plat_early_device_setup(void);
#define PDEV_ARCHDATA_FLAG_INIT 0
#define PDEV_ARCHDATA_FLAG_IDLE 1
#define PDEV_ARCHDATA_FLAG_SUSP 2
struct pdev_archdata {
int hwblk_id;
#ifdef CONFIG_PM_RUNTIME
unsigned long flags;
struct list_head entry;
struct mutex mutex;
#endif
};

View File

@@ -0,0 +1 @@
#include <asm-generic/div64.h>

View File

@@ -0,0 +1,216 @@
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm-generic/dma-coherent.h>
extern struct bus_type pci_bus_type;
#define dma_supported(dev, mask) (1)
static inline int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
static inline dma_addr_t dma_map_single(struct device *dev,
void *ptr, size_t size,
enum dma_data_direction dir)
{
dma_addr_t addr = virt_to_phys(ptr);
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return addr;
#endif
dma_cache_sync(dev, ptr, size, dir);
debug_dma_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, addr, true);
return addr;
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
debug_dma_unmap_page(dev, addr, size, dir, true);
}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
sg[i].dma_address = sg_phys(&sg[i]);
sg[i].dma_length = sg[i].length;
}
debug_dma_map_sg(dev, sg, nents, i, dir);
return nents;
}
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
debug_dma_unmap_sg(dev, sg, nents, dir);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
return dma_map_single(dev, page_address(page) + offset, size, dir);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction dir)
{
dma_unmap_single(dev, dma_address, size, dir);
}
static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return;
#endif
dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return;
#endif
dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
}
static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
sg[i].dma_address = sg_phys(&sg[i]);
sg[i].dma_length = sg[i].length;
}
}
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size,
enum dma_data_direction dir)
{
__dma_sync_single(dev, dma_handle, size, dir);
debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
debug_dma_sync_single_range_for_cpu(dev, dma_handle,
offset, size, direction);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
__dma_sync_single(dev, dma_handle+offset, size, direction);
debug_dma_sync_single_range_for_device(dev, dma_handle,
offset, size, direction);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
__dma_sync_sg(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
static inline int dma_get_cache_alignment(void)
{
/*
* Each processor family will define its own L1_CACHE_SHIFT,
* L1_CACHE_BYTES wraps to this, so this is always safe.
*/
return L1_CACHE_BYTES;
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags);
extern void
dma_release_declared_memory(struct device *dev);
extern void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
#endif /* __ASM_SH_DMA_MAPPING_H */

View File

@@ -0,0 +1,132 @@
/*
* arch/sh/include/asm/dma-sh.h
*
* Copyright (C) 2000 Takashi YOSHII
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __DMA_SH_H
#define __DMA_SH_H
#include <asm/dma.h>
#include <cpu/dma.h>
/* DMAOR contorl: The DMAOR access size is different by CPU.*/
#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
defined(CONFIG_CPU_SUBTYPE_SH7724) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
defined(CONFIG_CPU_SUBTYPE_SH7785)
#define dmaor_read_reg(n) \
(n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \
: ctrl_inw(SH_DMAC_BASE0 + DMAOR))
#define dmaor_write_reg(n, data) \
(n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \
: ctrl_outw(data, SH_DMAC_BASE0 + DMAOR))
#else /* Other CPU */
#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR)
#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)
#endif
static int dmte_irq_map[] __maybe_unused = {
#if (MAX_DMA_CHANNELS >= 4)
DMTE0_IRQ,
DMTE0_IRQ + 1,
DMTE0_IRQ + 2,
DMTE0_IRQ + 3,
#endif
#if (MAX_DMA_CHANNELS >= 6)
DMTE4_IRQ,
DMTE4_IRQ + 1,
#endif
#if (MAX_DMA_CHANNELS >= 8)
DMTE6_IRQ,
DMTE6_IRQ + 1,
#endif
#if (MAX_DMA_CHANNELS >= 12)
DMTE8_IRQ,
DMTE9_IRQ,
DMTE10_IRQ,
DMTE11_IRQ,
#endif
};
/* Definitions for the SuperH DMAC */
#define REQ_L 0x00000000
#define REQ_E 0x00080000
#define RACK_H 0x00000000
#define RACK_L 0x00040000
#define ACK_R 0x00000000
#define ACK_W 0x00020000
#define ACK_H 0x00000000
#define ACK_L 0x00010000
#define DM_INC 0x00004000
#define DM_DEC 0x00008000
#define SM_INC 0x00001000
#define SM_DEC 0x00002000
#define RS_IN 0x00000200
#define RS_OUT 0x00000300
#define TS_BLK 0x00000040
#define TM_BUR 0x00000020
#define CHCR_DE 0x00000001
#define CHCR_TE 0x00000002
#define CHCR_IE 0x00000004
/* DMAOR definitions */
#define DMAOR_AE 0x00000004
#define DMAOR_NMIF 0x00000002
#define DMAOR_DME 0x00000001
/*
* Define the default configuration for dual address memory-memory transfer.
* The 0x400 value represents auto-request, external->external.
*/
#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32)
/* DMA base address */
static u32 dma_base_addr[] __maybe_unused = {
#if (MAX_DMA_CHANNELS >= 4)
SH_DMAC_BASE0 + 0x00, /* channel 0 */
SH_DMAC_BASE0 + 0x10,
SH_DMAC_BASE0 + 0x20,
SH_DMAC_BASE0 + 0x30,
#endif
#if (MAX_DMA_CHANNELS >= 6)
SH_DMAC_BASE0 + 0x50,
SH_DMAC_BASE0 + 0x60,
#endif
#if (MAX_DMA_CHANNELS >= 8)
SH_DMAC_BASE1 + 0x00,
SH_DMAC_BASE1 + 0x10,
#endif
#if (MAX_DMA_CHANNELS >= 12)
SH_DMAC_BASE1 + 0x20,
SH_DMAC_BASE1 + 0x30,
SH_DMAC_BASE1 + 0x50,
SH_DMAC_BASE1 + 0x60, /* channel 11 */
#endif
};
/* DMA register */
#define SAR 0x00
#define DAR 0x04
#define TCR 0x08
#define CHCR 0x0C
#define DMAOR 0x40
/*
* for dma engine
*
* SuperH DMA mode
*/
#define SHDMA_MIX_IRQ (1 << 1)
#define SHDMA_DMAOR1 (1 << 2)
#define SHDMA_DMAE1 (1 << 3)
struct sh_dmae_pdata {
unsigned int mode;
};
#endif /* __DMA_SH_H */

View File

@@ -0,0 +1,167 @@
/*
* include/asm-sh/dma.h
*
* Copyright (C) 2003, 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_DMA_H
#define __ASM_SH_DMA_H
#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/sysdev.h>
#include <cpu/dma.h>
#include <asm-generic/dma.h>
#ifdef CONFIG_NR_DMA_CHANNELS
# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS)
#else
# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS)
#endif
/*
* Read and write modes can mean drastically different things depending on the
* channel configuration. Consult your DMAC documentation and module
* implementation for further clues.
*/
#define DMA_MODE_READ 0x00
#define DMA_MODE_WRITE 0x01
#define DMA_MODE_MASK 0x01
#define DMA_AUTOINIT 0x10
/*
* DMAC (dma_info) flags
*/
enum {
DMAC_CHANNELS_CONFIGURED = 0x01,
DMAC_CHANNELS_TEI_CAPABLE = 0x02, /* Transfer end interrupt */
};
/*
* DMA channel capabilities / flags
*/
enum {
DMA_CONFIGURED = 0x01,
/*
* Transfer end interrupt, inherited from DMAC.
* wait_queue used in dma_wait_for_completion.
*/
DMA_TEI_CAPABLE = 0x02,
};
extern spinlock_t dma_spin_lock;
struct dma_channel;
struct dma_ops {
int (*request)(struct dma_channel *chan);
void (*free)(struct dma_channel *chan);
int (*get_residue)(struct dma_channel *chan);
int (*xfer)(struct dma_channel *chan, unsigned long sar,
unsigned long dar, size_t count, unsigned int mode);
int (*configure)(struct dma_channel *chan, unsigned long flags);
int (*extend)(struct dma_channel *chan, unsigned long op, void *param);
};
struct dma_info;
struct dma_channel {
char dev_id[16]; /* unique name per DMAC of channel */
struct dma_info *info; /* SIM: can this be simply dma_ops? */
unsigned int chan; /* DMAC channel number */
unsigned int vchan; /* Virtual channel number */
unsigned int mode;
unsigned int count;
unsigned long sar;
unsigned long dar;
const char **caps;
unsigned long flags;
atomic_t busy;
wait_queue_head_t wait_queue;
struct sys_device dev;
void *priv_data;
};
struct dma_info {
struct platform_device *pdev;
const char *name;
unsigned int nr_channels;
unsigned long flags;
struct dma_ops *ops;
struct dma_channel *channels;
struct list_head list;
int first_channel_nr;
int first_vchannel_nr;
};
struct dma_chan_caps {
int ch_num;
const char **caplist;
};
#define to_dma_channel(channel) container_of(channel, struct dma_channel, dev)
/* arch/sh/drivers/dma/dma-api.c */
extern int dma_xfer(unsigned int chan, unsigned long from,
unsigned long to, size_t size, unsigned int mode);
#define dma_write(chan, from, to, size) \
dma_xfer(chan, from, to, size, DMA_MODE_WRITE)
#define dma_write_page(chan, from, to) \
dma_write(chan, from, to, PAGE_SIZE)
#define dma_read(chan, from, to, size) \
dma_xfer(chan, from, to, size, DMA_MODE_READ)
#define dma_read_page(chan, from, to) \
dma_read(chan, from, to, PAGE_SIZE)
extern int request_dma_bycap(const char **dmac, const char **caps,
const char *dev_id);
extern int get_dma_residue(unsigned int chan);
extern struct dma_info *get_dma_info(unsigned int chan);
extern struct dma_channel *get_dma_channel(unsigned int chan);
extern void dma_wait_for_completion(unsigned int chan);
extern void dma_configure_channel(unsigned int chan, unsigned long flags);
extern int register_dmac(struct dma_info *info);
extern void unregister_dmac(struct dma_info *info);
extern struct dma_info *get_dma_info_by_name(const char *dmac_name);
extern int dma_extend(unsigned int chan, unsigned long op, void *param);
extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
/* arch/sh/drivers/dma/dma-sysfs.c */
extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *);
extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#ifdef CONFIG_STM_DMA
#define DMA_REQ_ANY_CHANNEL 0xf0f0f0f0
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_DMA_H */

View File

@@ -0,0 +1,23 @@
/*
* SH7760 DMABRG (USB/Audio) support
*/
#ifndef _DMABRG_H_
#define _DMABRG_H_
/* IRQ sources */
#define DMABRGIRQ_USBDMA 0
#define DMABRGIRQ_USBDMAERR 1
#define DMABRGIRQ_A0TXF 2
#define DMABRGIRQ_A0TXH 3
#define DMABRGIRQ_A0RXF 4
#define DMABRGIRQ_A0RXH 5
#define DMABRGIRQ_A1TXF 6
#define DMABRGIRQ_A1TXH 7
#define DMABRGIRQ_A1RXF 8
#define DMABRGIRQ_A1RXH 9
extern int dmabrg_request_irq(unsigned int, void(*)(void *), void *);
extern void dmabrg_free_irq(unsigned int);
#endif

View File

@@ -0,0 +1,398 @@
/*
* Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#ifndef __ASM_SH_DWARF_H
#define __ASM_SH_DWARF_H
#ifdef CONFIG_DWARF_UNWINDER
/*
* DWARF expression operations
*/
#define DW_OP_addr 0x03
#define DW_OP_deref 0x06
#define DW_OP_const1u 0x08
#define DW_OP_const1s 0x09
#define DW_OP_const2u 0x0a
#define DW_OP_const2s 0x0b
#define DW_OP_const4u 0x0c
#define DW_OP_const4s 0x0d
#define DW_OP_const8u 0x0e
#define DW_OP_const8s 0x0f
#define DW_OP_constu 0x10
#define DW_OP_consts 0x11
#define DW_OP_dup 0x12
#define DW_OP_drop 0x13
#define DW_OP_over 0x14
#define DW_OP_pick 0x15
#define DW_OP_swap 0x16
#define DW_OP_rot 0x17
#define DW_OP_xderef 0x18
#define DW_OP_abs 0x19
#define DW_OP_and 0x1a
#define DW_OP_div 0x1b
#define DW_OP_minus 0x1c
#define DW_OP_mod 0x1d
#define DW_OP_mul 0x1e
#define DW_OP_neg 0x1f
#define DW_OP_not 0x20
#define DW_OP_or 0x21
#define DW_OP_plus 0x22
#define DW_OP_plus_uconst 0x23
#define DW_OP_shl 0x24
#define DW_OP_shr 0x25
#define DW_OP_shra 0x26
#define DW_OP_xor 0x27
#define DW_OP_skip 0x2f
#define DW_OP_bra 0x28
#define DW_OP_eq 0x29
#define DW_OP_ge 0x2a
#define DW_OP_gt 0x2b
#define DW_OP_le 0x2c
#define DW_OP_lt 0x2d
#define DW_OP_ne 0x2e
#define DW_OP_lit0 0x30
#define DW_OP_lit1 0x31
#define DW_OP_lit2 0x32
#define DW_OP_lit3 0x33
#define DW_OP_lit4 0x34
#define DW_OP_lit5 0x35
#define DW_OP_lit6 0x36
#define DW_OP_lit7 0x37
#define DW_OP_lit8 0x38
#define DW_OP_lit9 0x39
#define DW_OP_lit10 0x3a
#define DW_OP_lit11 0x3b
#define DW_OP_lit12 0x3c
#define DW_OP_lit13 0x3d
#define DW_OP_lit14 0x3e
#define DW_OP_lit15 0x3f
#define DW_OP_lit16 0x40
#define DW_OP_lit17 0x41
#define DW_OP_lit18 0x42
#define DW_OP_lit19 0x43
#define DW_OP_lit20 0x44
#define DW_OP_lit21 0x45
#define DW_OP_lit22 0x46
#define DW_OP_lit23 0x47
#define DW_OP_lit24 0x48
#define DW_OP_lit25 0x49
#define DW_OP_lit26 0x4a
#define DW_OP_lit27 0x4b
#define DW_OP_lit28 0x4c
#define DW_OP_lit29 0x4d
#define DW_OP_lit30 0x4e
#define DW_OP_lit31 0x4f
#define DW_OP_reg0 0x50
#define DW_OP_reg1 0x51
#define DW_OP_reg2 0x52
#define DW_OP_reg3 0x53
#define DW_OP_reg4 0x54
#define DW_OP_reg5 0x55
#define DW_OP_reg6 0x56
#define DW_OP_reg7 0x57
#define DW_OP_reg8 0x58
#define DW_OP_reg9 0x59
#define DW_OP_reg10 0x5a
#define DW_OP_reg11 0x5b
#define DW_OP_reg12 0x5c
#define DW_OP_reg13 0x5d
#define DW_OP_reg14 0x5e
#define DW_OP_reg15 0x5f
#define DW_OP_reg16 0x60
#define DW_OP_reg17 0x61
#define DW_OP_reg18 0x62
#define DW_OP_reg19 0x63
#define DW_OP_reg20 0x64
#define DW_OP_reg21 0x65
#define DW_OP_reg22 0x66
#define DW_OP_reg23 0x67
#define DW_OP_reg24 0x68
#define DW_OP_reg25 0x69
#define DW_OP_reg26 0x6a
#define DW_OP_reg27 0x6b
#define DW_OP_reg28 0x6c
#define DW_OP_reg29 0x6d
#define DW_OP_reg30 0x6e
#define DW_OP_reg31 0x6f
#define DW_OP_breg0 0x70
#define DW_OP_breg1 0x71
#define DW_OP_breg2 0x72
#define DW_OP_breg3 0x73
#define DW_OP_breg4 0x74
#define DW_OP_breg5 0x75
#define DW_OP_breg6 0x76
#define DW_OP_breg7 0x77
#define DW_OP_breg8 0x78
#define DW_OP_breg9 0x79
#define DW_OP_breg10 0x7a
#define DW_OP_breg11 0x7b
#define DW_OP_breg12 0x7c
#define DW_OP_breg13 0x7d
#define DW_OP_breg14 0x7e
#define DW_OP_breg15 0x7f
#define DW_OP_breg16 0x80
#define DW_OP_breg17 0x81
#define DW_OP_breg18 0x82
#define DW_OP_breg19 0x83
#define DW_OP_breg20 0x84
#define DW_OP_breg21 0x85
#define DW_OP_breg22 0x86
#define DW_OP_breg23 0x87
#define DW_OP_breg24 0x88
#define DW_OP_breg25 0x89
#define DW_OP_breg26 0x8a
#define DW_OP_breg27 0x8b
#define DW_OP_breg28 0x8c
#define DW_OP_breg29 0x8d
#define DW_OP_breg30 0x8e
#define DW_OP_breg31 0x8f
#define DW_OP_regx 0x90
#define DW_OP_fbreg 0x91
#define DW_OP_bregx 0x92
#define DW_OP_piece 0x93
#define DW_OP_deref_size 0x94
#define DW_OP_xderef_size 0x95
#define DW_OP_nop 0x96
#define DW_OP_push_object_address 0x97
#define DW_OP_call2 0x98
#define DW_OP_call4 0x99
#define DW_OP_call_ref 0x9a
#define DW_OP_form_tls_address 0x9b
#define DW_OP_call_frame_cfa 0x9c
#define DW_OP_bit_piece 0x9d
#define DW_OP_lo_user 0xe0
#define DW_OP_hi_user 0xff
/*
* Addresses used in FDE entries in the .eh_frame section may be encoded
* using one of the following encodings.
*/
#define DW_EH_PE_absptr 0x00
#define DW_EH_PE_omit 0xff
#define DW_EH_PE_uleb128 0x01
#define DW_EH_PE_udata2 0x02
#define DW_EH_PE_udata4 0x03
#define DW_EH_PE_udata8 0x04
#define DW_EH_PE_sleb128 0x09
#define DW_EH_PE_sdata2 0x0a
#define DW_EH_PE_sdata4 0x0b
#define DW_EH_PE_sdata8 0x0c
#define DW_EH_PE_signed 0x09
#define DW_EH_PE_pcrel 0x10
/*
* The architecture-specific register number that contains the return
* address in the .debug_frame table.
*/
#define DWARF_ARCH_RA_REG 17
#ifndef __ASSEMBLY__
/*
* Read either the frame pointer (r14) or the stack pointer (r15).
* NOTE: this MUST be inlined.
*/
static __always_inline unsigned long dwarf_read_arch_reg(unsigned int reg)
{
unsigned long value = 0;
switch (reg) {
case 14:
__asm__ __volatile__("mov r14, %0\n" : "=r" (value));
break;
case 15:
__asm__ __volatile__("mov r15, %0\n" : "=r" (value));
break;
default:
BUG();
}
return value;
}
/**
* dwarf_cie - Common Information Entry
*/
struct dwarf_cie {
unsigned long length;
unsigned long cie_id;
unsigned char version;
const char *augmentation;
unsigned int code_alignment_factor;
int data_alignment_factor;
/* Which column in the rule table represents return addr of func. */
unsigned int return_address_reg;
unsigned char *initial_instructions;
unsigned char *instructions_end;
unsigned char encoding;
unsigned long cie_pointer;
struct list_head link;
unsigned long flags;
#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
};
/**
* dwarf_fde - Frame Description Entry
*/
struct dwarf_fde {
unsigned long length;
unsigned long cie_pointer;
struct dwarf_cie *cie;
unsigned long initial_location;
unsigned long address_range;
unsigned char *instructions;
unsigned char *end;
struct list_head link;
};
/**
* dwarf_frame - DWARF information for a frame in the call stack
*/
struct dwarf_frame {
struct dwarf_frame *prev, *next;
unsigned long pc;
struct list_head reg_list;
unsigned long cfa;
/* Valid when DW_FRAME_CFA_REG_OFFSET is set in flags */
unsigned int cfa_register;
unsigned int cfa_offset;
/* Valid when DW_FRAME_CFA_REG_EXP is set in flags */
unsigned char *cfa_expr;
unsigned int cfa_expr_len;
unsigned long flags;
#define DWARF_FRAME_CFA_REG_OFFSET (1 << 0)
#define DWARF_FRAME_CFA_REG_EXP (1 << 1)
unsigned long return_addr;
};
/**
* dwarf_reg - DWARF register
* @flags: Describes how to calculate the value of this register
*/
struct dwarf_reg {
struct list_head link;
unsigned int number;
unsigned long addr;
unsigned long flags;
#define DWARF_REG_OFFSET (1 << 0)
#define DWARF_VAL_OFFSET (1 << 1)
#define DWARF_UNDEFINED (1 << 2)
};
/*
* Call Frame instruction opcodes.
*/
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
#define DW_CFA_restore 0xc0
#define DW_CFA_nop 0x00
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
#define DW_CFA_advance_loc4 0x04
#define DW_CFA_offset_extended 0x05
#define DW_CFA_restore_extended 0x06
#define DW_CFA_undefined 0x07
#define DW_CFA_same_value 0x08
#define DW_CFA_register 0x09
#define DW_CFA_remember_state 0x0a
#define DW_CFA_restore_state 0x0b
#define DW_CFA_def_cfa 0x0c
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_CFA_offset_extended_sf 0x11
#define DW_CFA_def_cfa_sf 0x12
#define DW_CFA_def_cfa_offset_sf 0x13
#define DW_CFA_val_offset 0x14
#define DW_CFA_val_offset_sf 0x15
#define DW_CFA_val_expression 0x16
#define DW_CFA_lo_user 0x1c
#define DW_CFA_hi_user 0x3f
/* GNU extension opcodes */
#define DW_CFA_GNU_args_size 0x2e
#define DW_CFA_GNU_negative_offset_extended 0x2f
/*
* Some call frame instructions encode their operands in the opcode. We
* need some helper functions to extract both the opcode and operands
* from an instruction.
*/
static inline unsigned int DW_CFA_opcode(unsigned long insn)
{
return (insn & 0xc0);
}
static inline unsigned int DW_CFA_operand(unsigned long insn)
{
return (insn & 0x3f);
}
#define DW_EH_FRAME_CIE 0 /* .eh_frame CIE IDs are 0 */
#define DW_CIE_ID 0xffffffff
#define DW64_CIE_ID 0xffffffffffffffffULL
/*
* DWARF FDE/CIE length field values.
*/
#define DW_EXT_LO 0xfffffff0
#define DW_EXT_HI 0xffffffff
#define DW_EXT_DWARF64 DW_EXT_HI
extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
struct dwarf_frame *);
#endif /* !__ASSEMBLY__ */
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_REGISTER .cfi_register
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
#else
/*
* Use the asm comment character to ignore the rest of the line.
*/
#define CFI_IGNORE !
#define CFI_STARTPROC CFI_IGNORE
#define CFI_ENDPROC CFI_IGNORE
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#ifndef __ASSEMBLY__
static inline void dwarf_unwinder_init(void)
{
}
#endif
#endif /* CONFIG_DWARF_UNWINDER */
#endif /* __ASM_SH_DWARF_H */

View File

@@ -0,0 +1,246 @@
#ifndef __ASM_SH_ELF_H
#define __ASM_SH_ELF_H
#include <linux/utsname.h>
#include <asm/auxvec.h>
#include <asm/ptrace.h>
#include <asm/user.h>
/* ELF header e_flags defines */
#define EF_SH_PIC 0x100 /* -fpic */
#define EF_SH_FDPIC 0x8000 /* -mfdpic */
/* SH (particularly SHcompact) relocation types */
#define R_SH_NONE 0
#define R_SH_DIR32 1
#define R_SH_REL32 2
#define R_SH_DIR8WPN 3
#define R_SH_IND12W 4
#define R_SH_DIR8WPL 5
#define R_SH_DIR8WPZ 6
#define R_SH_DIR8BP 7
#define R_SH_DIR8W 8
#define R_SH_DIR8L 9
#define R_SH_SWITCH16 25
#define R_SH_SWITCH32 26
#define R_SH_USES 27
#define R_SH_COUNT 28
#define R_SH_ALIGN 29
#define R_SH_CODE 30
#define R_SH_DATA 31
#define R_SH_LABEL 32
#define R_SH_SWITCH8 33
#define R_SH_GNU_VTINHERIT 34
#define R_SH_GNU_VTENTRY 35
#define R_SH_TLS_GD_32 144
#define R_SH_TLS_LD_32 145
#define R_SH_TLS_LDO_32 146
#define R_SH_TLS_IE_32 147
#define R_SH_TLS_LE_32 148
#define R_SH_TLS_DTPMOD32 149
#define R_SH_TLS_DTPOFF32 150
#define R_SH_TLS_TPOFF32 151
#define R_SH_GOT32 160
#define R_SH_PLT32 161
#define R_SH_COPY 162
#define R_SH_GLOB_DAT 163
#define R_SH_JMP_SLOT 164
#define R_SH_RELATIVE 165
#define R_SH_GOTOFF 166
#define R_SH_GOTPC 167
/* FDPIC relocs */
#define R_SH_GOT20 70
#define R_SH_GOTOFF20 71
#define R_SH_GOTFUNCDESC 72
#define R_SH_GOTFUNCDESC20 73
#define R_SH_GOTOFFFUNCDESC 74
#define R_SH_GOTOFFFUNCDESC20 75
#define R_SH_FUNCDESC 76
#define R_SH_FUNCDESC_VALUE 77
#if 0 /* XXX - later .. */
#define R_SH_GOT20 198
#define R_SH_GOTOFF20 199
#define R_SH_GOTFUNCDESC 200
#define R_SH_GOTFUNCDESC20 201
#define R_SH_GOTOFFFUNCDESC 202
#define R_SH_GOTOFFFUNCDESC20 203
#define R_SH_FUNCDESC 204
#define R_SH_FUNCDESC_VALUE 205
#endif
/* SHmedia relocs */
#define R_SH_IMM_LOW16 246
#define R_SH_IMM_LOW16_PCREL 247
#define R_SH_IMM_MEDLOW16 248
#define R_SH_IMM_MEDLOW16_PCREL 249
/* Keep this the last entry. */
#define R_SH_NUM 256
/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpu_struct elf_fpregset_t;
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define ELF_ARCH EM_SH
#ifdef __KERNEL__
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_SH)
#define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC)
#define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC)
/*
* Enable dump using regset.
* This covers all of general/DSP/FPU regs.
*/
#define CORE_DUMP_USE_REGSET
#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define ELF_CORE_COPY_REGS(_dest,_regs) \
memcpy((char *) &_dest, (char *) _regs, \
sizeof(struct pt_regs));
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.flags)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (utsname()->machine)
#ifdef __SH5__
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
_r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
_r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
_r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
_r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
_r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
_r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
_r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
_r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
_r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
_r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
_r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
_r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
_r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
_r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
_r->sr = SR_FD | SR_MMU; } while (0)
#else
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \
_r->sr = SR_FD; } while (0)
#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, \
_dynamic_addr) \
do { \
_r->regs[0] = 0; \
_r->regs[1] = 0; \
_r->regs[2] = 0; \
_r->regs[3] = 0; \
_r->regs[4] = 0; \
_r->regs[5] = 0; \
_r->regs[6] = 0; \
_r->regs[7] = 0; \
_r->regs[8] = _exec_map_addr; \
_r->regs[9] = _interp_map_addr; \
_r->regs[10] = _dynamic_addr; \
_r->regs[11] = 0; \
_r->regs[12] = 0; \
_r->regs[13] = 0; \
_r->regs[14] = 0; \
_r->sr = SR_FD; \
} while (0)
#endif
#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
#ifdef CONFIG_VSYSCALL
/* vDSO has arch_setup_additional_pages */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
extern unsigned int vdso_enabled;
extern void __kernel_vsyscall;
#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
#define VSYSCALL_AUX_ENT \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
else \
NEW_AUX_ENT(AT_IGNORE, 0);
#else
#define VSYSCALL_AUX_ENT
#endif /* CONFIG_VSYSCALL */
#ifdef CONFIG_SH_FPU
#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
#else
#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif
extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
/* Optional FPU initialization */ \
FPU_AUX_ENT; \
\
/* Optional vsyscall entry */ \
VSYSCALL_AUX_ENT; \
\
/* Cache desc */ \
NEW_AUX_ENT(AT_L1I_CACHESHAPE, l1i_cache_shape); \
NEW_AUX_ENT(AT_L1D_CACHESHAPE, l1d_cache_shape); \
NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \
} while (0)
#endif /* __KERNEL__ */
#endif /* __ASM_SH_ELF_H */

View File

@@ -0,0 +1,6 @@
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */

View File

@@ -0,0 +1,120 @@
! entry.S macro define
.macro cli
stc sr, r0
or #0xf0, r0
ldc r0, sr
.endm
.macro sti
mov #0xfffffff0, r11
extu.b r11, r11
not r11, r11
stc sr, r10
and r11, r10
#ifdef CONFIG_CPU_HAS_SR_RB
stc k_g_imask, r11
or r11, r10
#endif
ldc r10, sr
.endm
.macro get_current_thread_info, ti, tmp
#ifdef CONFIG_CPU_HAS_SR_RB
stc r7_bank, \ti
#else
mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
shll8 \tmp
shll2 \tmp
mov r15, \ti
and \tmp, \ti
#endif
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_on
7835:
.endm
.macro TRACE_IRQS_OFF
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_off
7835:
.endm
#else
.macro TRACE_IRQS_ON
.endm
.macro TRACE_IRQS_OFF
.endm
#endif
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
# define PREF(x) pref @x
#else
# define PREF(x) nop
#endif
/*
* Macro for use within assembly. Because the DWARF unwinder
* needs to use the frame register to unwind the stack, we
* need to setup r14 with the value of the stack pointer as
* the return address is usually on the stack somewhere.
*/
.macro setup_frame_reg
#ifdef CONFIG_DWARF_UNWINDER
mov r15, r14
#endif
.endm

View File

@@ -0,0 +1,6 @@
#ifndef __ASM_SH_ERRNO_H
#define __ASM_SH_ERRNO_H
#include <asm-generic/errno.h>
#endif /* __ASM_SH_ERRNO_H */

View File

@@ -0,0 +1,19 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

View File

@@ -0,0 +1 @@
#include <asm-generic/fcntl.h>

View File

@@ -0,0 +1,123 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <linux/kernel.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of P3 backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
/*
* The FIX_CMAP entries are used by kmap_coherent() to get virtual
* addresses which are of a known color, and so their values are
* important. __fix_to_virt(FIX_CMAP_END - n) must give an address
* which is the same color as a page (n<<PAGE_SHIFT).
*/
#define FIX_N_COLOURS 16
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS - 1,
FIX_UNCACHED,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#ifdef CONFIG_SUPERH32
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
#else
#define FIXADDR_TOP (0xff000000 - PAGE_SIZE)
#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif

View File

@@ -0,0 +1,30 @@
/*
* include/asm-sh/flat.h
*
* uClinux flat-format executables
*
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive for
* more details.
*/
#ifndef __ASM_SH_FLAT_H
#define __ASM_SH_FLAT_H
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) ({ (void)p; 0; })
#define FLAT_PLAT_INIT(_r) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \
_r->sr = SR_FD; } while (0)
#endif /* __ASM_SH_FLAT_H */

View File

@@ -0,0 +1,84 @@
#ifndef __ASM_SH_FPU_H
#define __ASM_SH_FPU_H
#ifndef __ASSEMBLY__
#include <linux/preempt.h>
#include <asm/ptrace.h>
#ifdef CONFIG_SH_FPU
static inline void release_fpu(struct pt_regs *regs)
{
regs->sr |= SR_FD;
}
static inline void grab_fpu(struct pt_regs *regs)
{
regs->sr &= ~SR_FD;
}
struct task_struct;
extern void save_fpu(struct task_struct *__tsk);
void fpu_state_restore(struct pt_regs *regs);
#else
#define release_fpu(regs) do { } while (0)
#define grab_fpu(regs) do { } while (0)
static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
}
#endif
struct user_regset;
extern int do_fpu_inst(unsigned short, struct pt_regs *);
extern int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf);
static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
save_fpu(tsk);
release_fpu(regs);
} else
tsk->fpu_counter = 0;
}
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
__unlazy_fpu(tsk, regs);
preempt_enable();
}
static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
release_fpu(regs);
}
preempt_enable();
}
static inline int init_fpu(struct task_struct *tsk)
{
if (tsk_used_math(tsk)) {
if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
unlazy_fpu(tsk, task_pt_regs(tsk));
return 0;
}
set_stopped_child_used_math(tsk);
return 0;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_FPU_H */

View File

@@ -0,0 +1,18 @@
/*
* include/asm-sh/freq.h
*
* Copyright (C) 2002, 2003 Paul Mundt
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __ASM_SH_FREQ_H
#define __ASM_SH_FREQ_H
#ifdef __KERNEL__
#include <cpu/freq.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_FREQ_H */

View File

@@ -0,0 +1,38 @@
#ifndef __ASM_SH_FTRACE_H
#define __ASM_SH_FTRACE_H
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#define FTRACE_SYSCALL_MAX NR_syscalls
#ifndef __ASSEMBLY__
extern void mcount(void);
#define MCOUNT_ADDR ((long)(mcount))
#ifdef CONFIG_DYNAMIC_FTRACE
#define CALL_ADDR ((long)(ftrace_call))
#define STUB_ADDR ((long)(ftrace_stub))
#define GRAPH_ADDR ((long)(ftrace_graph_call))
#define CALLER_ADDR ((long)(ftrace_caller))
#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4)
#define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4)
struct dyn_arch_ftrace {
/* No extra data needed on sh */
};
#endif /* CONFIG_DYNAMIC_FTRACE */
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* 'addr' is the memory table address. */
return addr;
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* __ASM_SH_FTRACE_H */

View File

@@ -0,0 +1,111 @@
#ifndef __ASM_SH_FUTEX_IRQ_H
#define __ASM_SH_FUTEX_IRQ_H
#include <asm/system.h>
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval + oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval | oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval & oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval ^ oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
int oldval, int newval)
{
unsigned long flags;
int ret, prev = 0;
local_irq_save(flags);
ret = get_user(prev, uaddr);
if (!ret && oldval == prev)
ret = put_user(newval, uaddr);
local_irq_restore(flags);
if (ret)
return ret;
return prev;
}
#endif /* __ASM_SH_FUTEX_IRQ_H */

View File

@@ -0,0 +1,77 @@
#ifndef __ASM_SH_FUTEX_H
#define __ASM_SH_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
/* XXX: UP variants, fix for SH-4A and SMP.. */
#include <asm/futex-irq.h>
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ADD:
ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
break;
case FUTEX_OP_OR:
ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ANDN:
ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
break;
case FUTEX_OP_XOR:
ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
break;
default:
ret = -ENOSYS;
break;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval);
}
#endif /* __KERNEL__ */
#endif /* __ASM_SH_FUTEX_H */

View File

@@ -0,0 +1,142 @@
/*
* include/asm-sh/gpio.h
*
* Generic GPIO API and pinmux table support for SuperH.
*
* Copyright (c) 2008 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_GPIO_H
#define __ASM_SH_GPIO_H
#include <linux/kernel.h>
#include <linux/errno.h>
#if defined(CONFIG_STM_DRIVERS)
#include <linux/stm/gpio.h>
#else
#if defined(CONFIG_CPU_SH3)
#include <cpu/gpio.h>
#endif
#define ARCH_NR_GPIOS 512
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
static inline int gpio_get_value(unsigned gpio)
{
return __gpio_get_value(gpio);
}
static inline void gpio_set_value(unsigned gpio, int value)
{
__gpio_set_value(gpio, value);
}
static inline int gpio_cansleep(unsigned gpio)
{
return __gpio_cansleep(gpio);
}
static inline int gpio_to_irq(unsigned gpio)
{
WARN_ON(1);
return -ENOSYS;
}
static inline int irq_to_gpio(unsigned int irq)
{
WARN_ON(1);
return -EINVAL;
}
#endif /* CONFIG_GPIOLIB */
typedef unsigned short pinmux_enum_t;
typedef unsigned short pinmux_flag_t;
#define PINMUX_TYPE_NONE 0
#define PINMUX_TYPE_FUNCTION 1
#define PINMUX_TYPE_GPIO 2
#define PINMUX_TYPE_OUTPUT 3
#define PINMUX_TYPE_INPUT 4
#define PINMUX_TYPE_INPUT_PULLUP 5
#define PINMUX_TYPE_INPUT_PULLDOWN 6
#define PINMUX_FLAG_TYPE (0x7)
#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
#define PINMUX_FLAG_DBIT_SHIFT 5
#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
#define PINMUX_FLAG_DREG_SHIFT 10
#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
struct pinmux_gpio {
pinmux_enum_t enum_id;
pinmux_flag_t flags;
};
#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark }
#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
struct pinmux_cfg_reg {
unsigned long reg, reg_width, field_width;
unsigned long *cnt;
pinmux_enum_t *enum_ids;
};
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
.reg = r, .reg_width = r_width, .field_width = f_width, \
.cnt = (unsigned long [r_width / f_width]) {}, \
.enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
struct pinmux_data_reg {
unsigned long reg, reg_width, reg_shadow;
pinmux_enum_t *enum_ids;
};
#define PINMUX_DATA_REG(name, r, r_width) \
.reg = r, .reg_width = r_width, \
.enum_ids = (pinmux_enum_t [r_width]) \
struct pinmux_range {
pinmux_enum_t begin;
pinmux_enum_t end;
pinmux_enum_t force;
};
struct pinmux_info {
char *name;
pinmux_enum_t reserved_id;
struct pinmux_range data;
struct pinmux_range input;
struct pinmux_range input_pd;
struct pinmux_range input_pu;
struct pinmux_range output;
struct pinmux_range mark;
struct pinmux_range function;
unsigned first_gpio, last_gpio;
struct pinmux_gpio *gpios;
struct pinmux_cfg_reg *cfg_regs;
struct pinmux_data_reg *data_regs;
pinmux_enum_t *gpio_data;
unsigned int gpio_data_size;
unsigned long *gpio_in_use;
struct gpio_chip chip;
};
int register_pinmux(struct pinmux_info *pip);
#endif /* CONFIG_STM_DRIVERS */
#endif /* __ASM_SH_GPIO_H */

View File

@@ -0,0 +1,9 @@
#ifndef __ASM_SH_HARDIRQ_H
#define __ASM_SH_HARDIRQ_H
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
#include <asm-generic/hardirq.h>
#endif /* __ASM_SH_HARDIRQ_H */

View File

@@ -0,0 +1,251 @@
#ifndef __ASM_SH_HD64461
#define __ASM_SH_HD64461
/*
* Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2000 YAEGASHI Takeshi
*
* Hitachi HD64461 companion chip support
* (please note manual reference 0x10000000 = 0xb0000000)
*/
/* Constants for PCMCIA mappings */
#define HD64461_PCC_WINDOW 0x01000000
/* Area 6 - Slot 0 - memory and/or IO card */
#define HD64461_IOBASE 0xb0000000
#define HD64461_IO_OFFSET(x) (HD64461_IOBASE + (x))
#define HD64461_PCC0_BASE HD64461_IO_OFFSET(0x8000000)
#define HD64461_PCC0_ATTR (HD64461_PCC0_BASE) /* 0xb80000000 */
#define HD64461_PCC0_COMM (HD64461_PCC0_BASE+HD64461_PCC_WINDOW) /* 0xb90000000 */
#define HD64461_PCC0_IO (HD64461_PCC0_BASE+2*HD64461_PCC_WINDOW) /* 0xba0000000 */
/* Area 5 - Slot 1 - memory card only */
#define HD64461_PCC1_BASE HD64461_IO_OFFSET(0x4000000)
#define HD64461_PCC1_ATTR (HD64461_PCC1_BASE) /* 0xb4000000 */
#define HD64461_PCC1_COMM (HD64461_PCC1_BASE+HD64461_PCC_WINDOW) /* 0xb5000000 */
/* Standby Control Register for HD64461 */
#define HD64461_STBCR HD64461_IO_OFFSET(0x00000000)
#define HD64461_STBCR_CKIO_STBY 0x2000
#define HD64461_STBCR_SAFECKE_IST 0x1000
#define HD64461_STBCR_SLCKE_IST 0x0800
#define HD64461_STBCR_SAFECKE_OST 0x0400
#define HD64461_STBCR_SLCKE_OST 0x0200
#define HD64461_STBCR_SMIAST 0x0100
#define HD64461_STBCR_SLCDST 0x0080
#define HD64461_STBCR_SPC0ST 0x0040
#define HD64461_STBCR_SPC1ST 0x0020
#define HD64461_STBCR_SAFEST 0x0010
#define HD64461_STBCR_STM0ST 0x0008
#define HD64461_STBCR_STM1ST 0x0004
#define HD64461_STBCR_SIRST 0x0002
#define HD64461_STBCR_SURTST 0x0001
/* System Configuration Register */
#define HD64461_SYSCR HD64461_IO_OFFSET(0x02)
/* CPU Data Bus Control Register */
#define HD64461_SCPUCR HD64461_IO_OFFSET(0x04)
/* Base Address Register */
#define HD64461_LCDCBAR HD64461_IO_OFFSET(0x1000)
/* Line increment address */
#define HD64461_LCDCLOR HD64461_IO_OFFSET(0x1002)
/* Controls LCD controller */
#define HD64461_LCDCCR HD64461_IO_OFFSET(0x1004)
/* LCCDR control bits */
#define HD64461_LCDCCR_STBACK 0x0400 /* Standby Back */
#define HD64461_LCDCCR_STREQ 0x0100 /* Standby Request */
#define HD64461_LCDCCR_MOFF 0x0080 /* Memory Off */
#define HD64461_LCDCCR_REFSEL 0x0040 /* Refresh Select */
#define HD64461_LCDCCR_EPON 0x0020 /* End Power On */
#define HD64461_LCDCCR_SPON 0x0010 /* Start Power On */
/* Controls LCD (1) */
#define HD64461_LDR1 HD64461_IO_OFFSET(0x1010)
#define HD64461_LDR1_DON 0x01 /* Display On */
#define HD64461_LDR1_DINV 0x80 /* Display Invert */
/* Controls LCD (2) */
#define HD64461_LDR2 HD64461_IO_OFFSET(0x1012)
#define HD64461_LDHNCR HD64461_IO_OFFSET(0x1014) /* Number of horizontal characters */
#define HD64461_LDHNSR HD64461_IO_OFFSET(0x1016) /* Specify output start position + width of CL1 */
#define HD64461_LDVNTR HD64461_IO_OFFSET(0x1018) /* Specify total vertical lines */
#define HD64461_LDVNDR HD64461_IO_OFFSET(0x101a) /* specify number of display vertical lines */
#define HD64461_LDVSPR HD64461_IO_OFFSET(0x101c) /* specify vertical synchronization pos and AC nr */
/* Controls LCD (3) */
#define HD64461_LDR3 HD64461_IO_OFFSET(0x101e)
/* Palette Registers */
#define HD64461_CPTWAR HD64461_IO_OFFSET(0x1030) /* Color Palette Write Address Register */
#define HD64461_CPTWDR HD64461_IO_OFFSET(0x1032) /* Color Palette Write Data Register */
#define HD64461_CPTRAR HD64461_IO_OFFSET(0x1034) /* Color Palette Read Address Register */
#define HD64461_CPTRDR HD64461_IO_OFFSET(0x1036) /* Color Palette Read Data Register */
#define HD64461_GRDOR HD64461_IO_OFFSET(0x1040) /* Display Resolution Offset Register */
#define HD64461_GRSCR HD64461_IO_OFFSET(0x1042) /* Solid Color Register */
#define HD64461_GRCFGR HD64461_IO_OFFSET(0x1044) /* Accelerator Configuration Register */
#define HD64461_GRCFGR_ACCSTATUS 0x10 /* Accelerator Status */
#define HD64461_GRCFGR_ACCRESET 0x08 /* Accelerator Reset */
#define HD64461_GRCFGR_ACCSTART_BITBLT 0x06 /* Accelerator Start BITBLT */
#define HD64461_GRCFGR_ACCSTART_LINE 0x04 /* Accelerator Start Line Drawing */
#define HD64461_GRCFGR_COLORDEPTH16 0x01 /* Sets Colordepth 16 for Accelerator */
#define HD64461_GRCFGR_COLORDEPTH8 0x01 /* Sets Colordepth 8 for Accelerator */
/* Line Drawing Registers */
#define HD64461_LNSARH HD64461_IO_OFFSET(0x1046) /* Line Start Address Register (H) */
#define HD64461_LNSARL HD64461_IO_OFFSET(0x1048) /* Line Start Address Register (L) */
#define HD64461_LNAXLR HD64461_IO_OFFSET(0x104a) /* Axis Pixel Length Register */
#define HD64461_LNDGR HD64461_IO_OFFSET(0x104c) /* Diagonal Register */
#define HD64461_LNAXR HD64461_IO_OFFSET(0x104e) /* Axial Register */
#define HD64461_LNERTR HD64461_IO_OFFSET(0x1050) /* Start Error Term Register */
#define HD64461_LNMDR HD64461_IO_OFFSET(0x1052) /* Line Mode Register */
/* BitBLT Registers */
#define HD64461_BBTSSARH HD64461_IO_OFFSET(0x1054) /* Source Start Address Register (H) */
#define HD64461_BBTSSARL HD64461_IO_OFFSET(0x1056) /* Source Start Address Register (L) */
#define HD64461_BBTDSARH HD64461_IO_OFFSET(0x1058) /* Destination Start Address Register (H) */
#define HD64461_BBTDSARL HD64461_IO_OFFSET(0x105a) /* Destination Start Address Register (L) */
#define HD64461_BBTDWR HD64461_IO_OFFSET(0x105c) /* Destination Block Width Register */
#define HD64461_BBTDHR HD64461_IO_OFFSET(0x105e) /* Destination Block Height Register */
#define HD64461_BBTPARH HD64461_IO_OFFSET(0x1060) /* Pattern Start Address Register (H) */
#define HD64461_BBTPARL HD64461_IO_OFFSET(0x1062) /* Pattern Start Address Register (L) */
#define HD64461_BBTMARH HD64461_IO_OFFSET(0x1064) /* Mask Start Address Register (H) */
#define HD64461_BBTMARL HD64461_IO_OFFSET(0x1066) /* Mask Start Address Register (L) */
#define HD64461_BBTROPR HD64461_IO_OFFSET(0x1068) /* ROP Register */
#define HD64461_BBTMDR HD64461_IO_OFFSET(0x106a) /* BitBLT Mode Register */
/* PC Card Controller Registers */
/* Maps to Physical Area 6 */
#define HD64461_PCC0ISR HD64461_IO_OFFSET(0x2000) /* socket 0 interface status */
#define HD64461_PCC0GCR HD64461_IO_OFFSET(0x2002) /* socket 0 general control */
#define HD64461_PCC0CSCR HD64461_IO_OFFSET(0x2004) /* socket 0 card status change */
#define HD64461_PCC0CSCIER HD64461_IO_OFFSET(0x2006) /* socket 0 card status change interrupt enable */
#define HD64461_PCC0SCR HD64461_IO_OFFSET(0x2008) /* socket 0 software control */
/* Maps to Physical Area 5 */
#define HD64461_PCC1ISR HD64461_IO_OFFSET(0x2010) /* socket 1 interface status */
#define HD64461_PCC1GCR HD64461_IO_OFFSET(0x2012) /* socket 1 general control */
#define HD64461_PCC1CSCR HD64461_IO_OFFSET(0x2014) /* socket 1 card status change */
#define HD64461_PCC1CSCIER HD64461_IO_OFFSET(0x2016) /* socket 1 card status change interrupt enable */
#define HD64461_PCC1SCR HD64461_IO_OFFSET(0x2018) /* socket 1 software control */
/* PCC Interface Status Register */
#define HD64461_PCCISR_READY 0x80 /* card ready */
#define HD64461_PCCISR_MWP 0x40 /* card write-protected */
#define HD64461_PCCISR_VS2 0x20 /* voltage select pin 2 */
#define HD64461_PCCISR_VS1 0x10 /* voltage select pin 1 */
#define HD64461_PCCISR_CD2 0x08 /* card detect 2 */
#define HD64461_PCCISR_CD1 0x04 /* card detect 1 */
#define HD64461_PCCISR_BVD2 0x02 /* battery 1 */
#define HD64461_PCCISR_BVD1 0x01 /* battery 1 */
#define HD64461_PCCISR_PCD_MASK 0x0c /* card detect */
#define HD64461_PCCISR_BVD_MASK 0x03 /* battery voltage */
#define HD64461_PCCISR_BVD_BATGOOD 0x03 /* battery good */
#define HD64461_PCCISR_BVD_BATWARN 0x01 /* battery low warning */
#define HD64461_PCCISR_BVD_BATDEAD1 0x02 /* battery dead */
#define HD64461_PCCISR_BVD_BATDEAD2 0x00 /* battery dead */
/* PCC General Control Register */
#define HD64461_PCCGCR_DRVE 0x80 /* output drive */
#define HD64461_PCCGCR_PCCR 0x40 /* PC card reset */
#define HD64461_PCCGCR_PCCT 0x20 /* PC card type, 1=IO&mem, 0=mem */
#define HD64461_PCCGCR_VCC0 0x10 /* voltage control pin VCC0SEL0 */
#define HD64461_PCCGCR_PMMOD 0x08 /* memory mode */
#define HD64461_PCCGCR_PA25 0x04 /* pin A25 */
#define HD64461_PCCGCR_PA24 0x02 /* pin A24 */
#define HD64461_PCCGCR_REG 0x01 /* pin PCC0REG# */
/* PCC Card Status Change Register */
#define HD64461_PCCCSCR_SCDI 0x80 /* sw card detect intr */
#define HD64461_PCCCSCR_SRV1 0x40 /* reserved */
#define HD64461_PCCCSCR_IREQ 0x20 /* IREQ intr req */
#define HD64461_PCCCSCR_SC 0x10 /* STSCHG (status change) pin */
#define HD64461_PCCCSCR_CDC 0x08 /* CD (card detect) change */
#define HD64461_PCCCSCR_RC 0x04 /* READY change */
#define HD64461_PCCCSCR_BW 0x02 /* battery warning change */
#define HD64461_PCCCSCR_BD 0x01 /* battery dead change */
/* PCC Card Status Change Interrupt Enable Register */
#define HD64461_PCCCSCIER_CRE 0x80 /* change reset enable */
#define HD64461_PCCCSCIER_IREQE_MASK 0x60 /* IREQ enable */
#define HD64461_PCCCSCIER_IREQE_DISABLED 0x00 /* IREQ disabled */
#define HD64461_PCCCSCIER_IREQE_LEVEL 0x20 /* IREQ level-triggered */
#define HD64461_PCCCSCIER_IREQE_FALLING 0x40 /* IREQ falling-edge-trig */
#define HD64461_PCCCSCIER_IREQE_RISING 0x60 /* IREQ rising-edge-trig */
#define HD64461_PCCCSCIER_SCE 0x10 /* status change enable */
#define HD64461_PCCCSCIER_CDE 0x08 /* card detect change enable */
#define HD64461_PCCCSCIER_RE 0x04 /* ready change enable */
#define HD64461_PCCCSCIER_BWE 0x02 /* battery warn change enable */
#define HD64461_PCCCSCIER_BDE 0x01 /* battery dead change enable*/
/* PCC Software Control Register */
#define HD64461_PCCSCR_VCC1 0x02 /* voltage control pin 1 */
#define HD64461_PCCSCR_SWP 0x01 /* write protect */
/* PCC0 Output Pins Control Register */
#define HD64461_P0OCR HD64461_IO_OFFSET(0x202a)
/* PCC1 Output Pins Control Register */
#define HD64461_P1OCR HD64461_IO_OFFSET(0x202c)
/* PC Card General Control Register */
#define HD64461_PGCR HD64461_IO_OFFSET(0x202e)
/* Port Control Registers */
#define HD64461_GPACR HD64461_IO_OFFSET(0x4000) /* Port A - Handles IRDA/TIMER */
#define HD64461_GPBCR HD64461_IO_OFFSET(0x4002) /* Port B - Handles UART */
#define HD64461_GPCCR HD64461_IO_OFFSET(0x4004) /* Port C - Handles PCMCIA 1 */
#define HD64461_GPDCR HD64461_IO_OFFSET(0x4006) /* Port D - Handles PCMCIA 1 */
/* Port Control Data Registers */
#define HD64461_GPADR HD64461_IO_OFFSET(0x4010) /* A */
#define HD64461_GPBDR HD64461_IO_OFFSET(0x4012) /* B */
#define HD64461_GPCDR HD64461_IO_OFFSET(0x4014) /* C */
#define HD64461_GPDDR HD64461_IO_OFFSET(0x4016) /* D */
/* Interrupt Control Registers */
#define HD64461_GPAICR HD64461_IO_OFFSET(0x4020) /* A */
#define HD64461_GPBICR HD64461_IO_OFFSET(0x4022) /* B */
#define HD64461_GPCICR HD64461_IO_OFFSET(0x4024) /* C */
#define HD64461_GPDICR HD64461_IO_OFFSET(0x4026) /* D */
/* Interrupt Status Registers */
#define HD64461_GPAISR HD64461_IO_OFFSET(0x4040) /* A */
#define HD64461_GPBISR HD64461_IO_OFFSET(0x4042) /* B */
#define HD64461_GPCISR HD64461_IO_OFFSET(0x4044) /* C */
#define HD64461_GPDISR HD64461_IO_OFFSET(0x4046) /* D */
/* Interrupt Request Register & Interrupt Mask Register */
#define HD64461_NIRR HD64461_IO_OFFSET(0x5000)
#define HD64461_NIMR HD64461_IO_OFFSET(0x5002)
#define HD64461_IRQBASE OFFCHIP_IRQ_BASE
#define OFFCHIP_IRQ_BASE 64
#define HD64461_IRQ_NUM 16
#define HD64461_IRQ_UART (HD64461_IRQBASE+5)
#define HD64461_IRQ_IRDA (HD64461_IRQBASE+6)
#define HD64461_IRQ_TMU1 (HD64461_IRQBASE+9)
#define HD64461_IRQ_TMU0 (HD64461_IRQBASE+10)
#define HD64461_IRQ_GPIO (HD64461_IRQBASE+11)
#define HD64461_IRQ_AFE (HD64461_IRQBASE+12)
#define HD64461_IRQ_PCC1 (HD64461_IRQBASE+13)
#define HD64461_IRQ_PCC0 (HD64461_IRQBASE+14)
#define __IO_PREFIX hd64461
#include <asm/io_generic.h>
/* arch/sh/cchips/hd6446x/hd64461/setup.c */
void hd64461_register_irq_demux(int irq,
int (*demux) (int irq, void *dev), void *dev);
void hd64461_unregister_irq_demux(int irq);
#endif

View File

@@ -0,0 +1,18 @@
#ifndef __ASM_SH_HEARTBEAT_H
#define __ASM_SH_HEARTBEAT_H
#include <linux/timer.h>
#define HEARTBEAT_INVERTED (1 << 0)
struct heartbeat_data {
void __iomem *base;
unsigned char *bit_pos;
unsigned int nr_bits;
struct timer_list timer;
unsigned int regsize;
unsigned int mask;
unsigned long flags;
};
#endif /* __ASM_SH_HEARTBEAT_H */

View File

@@ -0,0 +1,92 @@
#ifndef _ASM_SH_HUGETLB_H
#define _ASM_SH_HUGETLB_H
#include <asm/page.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
#endif /* _ASM_SH_HUGETLB_H */

View File

@@ -0,0 +1,35 @@
#ifndef __ASM_SH_HW_IRQ_H
#define __ASM_SH_HW_IRQ_H
#include <linux/init.h>
#include <linux/sh_intc.h>
#include <asm/atomic.h>
extern atomic_t irq_err_count;
struct ipr_data {
unsigned char irq;
unsigned char ipr_idx; /* Index for the IPR registered */
unsigned char shift; /* Number of bits to shift the data */
unsigned char priority; /* The priority */
};
struct ipr_desc {
unsigned long *ipr_offsets;
unsigned int nr_offsets;
struct ipr_data *ipr_data;
unsigned int nr_irqs;
struct irq_chip chip;
};
void register_ipr_controller(struct ipr_desc *);
void __init plat_irq_setup(void);
void __init plat_irq_setup_sh3(void);
void __init plat_irq_setup_pins(int mode);
enum { IRQ_MODE_IRQ, IRQ_MODE_IRQ7654, IRQ_MODE_IRQ3210,
IRQ_MODE_IRL7654_MASK, IRQ_MODE_IRL3210_MASK,
IRQ_MODE_IRL7654, IRQ_MODE_IRL3210 };
#endif /* __ASM_SH_HW_IRQ_H */

View File

@@ -0,0 +1,83 @@
#ifndef __ASM_SH_HWBLK_H
#define __ASM_SH_HWBLK_H
#ifdef CONFIG_HWBLK
#include <asm/clock.h>
#include <asm/io.h>
#define HWBLK_CNT_USAGE 0
#define HWBLK_CNT_IDLE 1
#define HWBLK_CNT_DEVICES 2
#define HWBLK_CNT_NR 3
#define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */
#define HWBLK_AREA(_flags, _parent) \
{ \
.flags = _flags, \
.parent = _parent, \
}
struct hwblk_area {
int cnt[HWBLK_CNT_NR];
unsigned char parent;
unsigned char flags;
};
#define HWBLK(_mstp, _bit, _area) \
{ \
.mstp = (void __iomem *)_mstp, \
.bit = _bit, \
.area = _area, \
}
struct hwblk {
void __iomem *mstp;
unsigned char bit;
unsigned char area;
int cnt[HWBLK_CNT_NR];
};
struct hwblk_info {
struct hwblk_area *areas;
int nr_areas;
struct hwblk *hwblks;
int nr_hwblks;
};
/* Should be defined by processor-specific code */
int arch_hwblk_init(void);
int arch_hwblk_sleep_mode(void);
int hwblk_register(struct hwblk_info *info);
int hwblk_init(void);
void hwblk_enable(struct hwblk_info *info, int hwblk);
void hwblk_disable(struct hwblk_info *info, int hwblk);
void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int cnt);
void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int cnt);
/* allow clocks to enable and disable hardware blocks */
#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \
{ \
.name = _name, \
.id = _id, \
.parent = _parent, \
.arch_flags = _hwblk, \
.flags = _flags, \
}
int sh_hwblk_clk_register(struct clk *clks, int nr);
#else /* CONFIG_HWBLK */
inline int hwblk_init(void)
{
return 0;
}
#endif /* CONFIG_HWBLK */
#endif /* __ASM_SH_HWBLK_H */

View File

@@ -0,0 +1,22 @@
/*
* MMIO/IRQ and platform data for SH7760 I2C channels
*/
#ifndef _I2C_SH7760_H_
#define _I2C_SH7760_H_
#define SH7760_I2C_DEVNAME "sh7760-i2c"
#define SH7760_I2C0_MMIO 0xFE140000
#define SH7760_I2C0_MMIOEND 0xFE14003B
#define SH7760_I2C0_IRQ 62
#define SH7760_I2C1_MMIO 0xFE150000
#define SH7760_I2C1_MMIOEND 0xFE15003B
#define SH7760_I2C1_IRQ 63
struct sh7760_i2c_platdata {
unsigned int speed_khz;
};
#endif

View File

@@ -0,0 +1,45 @@
#ifndef __ASM_SH_ILSEL_H
#define __ASM_SH_ILSEL_H
typedef enum {
ILSEL_NONE,
ILSEL_LAN,
ILSEL_USBH_I,
ILSEL_USBH_S,
ILSEL_USBH_V,
ILSEL_RTC,
ILSEL_USBP_I,
ILSEL_USBP_S,
ILSEL_USBP_V,
ILSEL_KEY,
/*
* ILSEL Aliases - corner cases for interleaved level tables.
*
* Someone thought this was a good idea and less hassle than
* demuxing a shared vector, really.
*/
/* ILSEL0 and 2 */
ILSEL_FPGA0,
ILSEL_FPGA1,
ILSEL_EX1,
ILSEL_EX2,
ILSEL_EX3,
ILSEL_EX4,
/* ILSEL1 and 3 */
ILSEL_FPGA2 = ILSEL_FPGA0,
ILSEL_FPGA3 = ILSEL_FPGA1,
ILSEL_EX5 = ILSEL_EX1,
ILSEL_EX6 = ILSEL_EX2,
ILSEL_EX7 = ILSEL_EX3,
ILSEL_EX8 = ILSEL_EX4,
} ilsel_source_t;
/* arch/sh/boards/renesas/x3proto/ilsel.c */
int ilsel_enable(ilsel_source_t set);
int ilsel_enable_fixed(ilsel_source_t set, unsigned int level);
void ilsel_disable(unsigned int irq);
#endif /* __ASM_SH_ILSEL_H */

View File

@@ -0,0 +1,322 @@
#ifndef __ASM_SH_IO_H
#define __ASM_SH_IO_H
/*
* Convention:
* read{b,w,l,q}/write{b,w,l,q} are for PCI,
* while in{b,w,l}/out{b,w,l} are for ISA
*
* In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
* and 'string' versions: ins{b,w,l}/outs{b,w,l}
*
* While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
* automatically, there are also __raw versions, which do not.
*
* Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
* SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
* these have the same semantics as the __raw variants, and as such, all
* new code should be using the __raw versions.
*
* All ISA I/O routines are wrapped through the machine vector. If a
* board does not provide overrides, a generic set that are copied in
* from the default machine vector are used instead. These are largely
* for old compat code for I/O offseting to SuperIOs, all of which are
* better handled through the machvec ioport mapping routines these days.
*/
#include <asm/cache.h>
#include <asm/system.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm-generic/iomap.h>
#ifdef __KERNEL__
/*
* Depending on which platform we are running on, we need different
* I/O functions.
*/
#define __IO_PREFIX generic
#include <asm/io_generic.h>
#include <asm/io_trapped.h>
#define inb(p) sh_mv.mv_inb((p))
#define inw(p) sh_mv.mv_inw((p))
#define inl(p) sh_mv.mv_inl((p))
#define outb(x,p) sh_mv.mv_outb((x),(p))
#define outw(x,p) sh_mv.mv_outw((x),(p))
#define outl(x,p) sh_mv.mv_outl((x),(p))
#define inb_p(p) sh_mv.mv_inb_p((p))
#define inw_p(p) sh_mv.mv_inw_p((p))
#define inl_p(p) sh_mv.mv_inl_p((p))
#define outb_p(x,p) sh_mv.mv_outb_p((x),(p))
#define outw_p(x,p) sh_mv.mv_outw_p((x),(p))
#define outl_p(x,p) sh_mv.mv_outl_p((x),(p))
#define insb(p,b,c) sh_mv.mv_insb((p), (b), (c))
#define insw(p,b,c) sh_mv.mv_insw((p), (b), (c))
#define insl(p,b,c) sh_mv.mv_insl((p), (b), (c))
#define outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c))
#define outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c))
#define outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c))
#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
#define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
#define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
#define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; })
#define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; })
#define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; })
#define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; })
#define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); })
#define writew(v,a) ({ __raw_writew((v),(a)); mb(); })
#define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
#define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); })
/* SuperH on-chip I/O functions */
#define ctrl_inb __raw_readb
#define ctrl_inw __raw_readw
#define ctrl_inl __raw_readl
#define ctrl_inq __raw_readq
#define ctrl_outb __raw_writeb
#define ctrl_outw __raw_writew
#define ctrl_outl __raw_writel
#define ctrl_outq __raw_writeq
static inline void ctrl_delay(void)
{
#ifdef CONFIG_CPU_SH4
__raw_readw(CCN_PVR);
#elif defined(P2SEG)
__raw_readw(P2SEG);
#else
#error "Need a dummy address for delay"
#endif
}
#define __BUILD_MEMORY_STRING(bwlq, type) \
\
static inline void __raw_writes##bwlq(volatile void __iomem *mem, \
const void *addr, unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
__raw_write##bwlq(*__addr, mem); \
__addr++; \
} \
} \
\
static inline void __raw_reads##bwlq(volatile void __iomem *mem, \
void *addr, unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = __raw_read##bwlq(mem); \
__addr++; \
} \
}
__BUILD_MEMORY_STRING(b, u8)
__BUILD_MEMORY_STRING(w, u16)
#ifdef CONFIG_SUPERH32
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#else
__BUILD_MEMORY_STRING(l, u32)
#endif
__BUILD_MEMORY_STRING(q, u64)
#define writesb __raw_writesb
#define writesw __raw_writesw
#define writesl __raw_writesl
#define readsb __raw_readsb
#define readsw __raw_readsw
#define readsl __raw_readsl
#define readb_relaxed(a) readb(a)
#define readw_relaxed(a) readw(a)
#define readl_relaxed(a) readl(a)
#define readq_relaxed(a) readq(a)
#ifndef CONFIG_GENERIC_IOMAP
/* Simple MMIO */
#define ioread8(a) __raw_readb(a)
#define ioread16(a) __raw_readw(a)
#define ioread16be(a) be16_to_cpu(__raw_readw((a)))
#define ioread32(a) __raw_readl(a)
#define ioread32be(a) be32_to_cpu(__raw_readl((a)))
#define iowrite8(v,a) __raw_writeb((v),(a))
#define iowrite16(v,a) __raw_writew((v),(a))
#define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a))
#define iowrite32(v,a) __raw_writel((v),(a))
#define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
#define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c))
#define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c))
#define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c))
#define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c))
#define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c))
#define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c))
#endif
/*
* Use __raw_readsl and __raw_writesl rather than the generic versions from
* iomap.c because we have optimised versions for the SH.
*/
#define mmio_insb(p,d,c) __raw_readsb(p,d,c)
#define mmio_insw(p,d,c) __raw_readsw(p,d,c)
#define mmio_insl(p,d,c) __raw_readsl(p,d,c)
#define mmio_outsb(p,s,c) __raw_writesb(p,s,c)
#define mmio_outsw(p,s,c) __raw_writesw(p,s,c)
#define mmio_outsl(p,s,c) __raw_writesl(p,s,c)
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
#define IO_SPACE_LIMIT 0xffffffff
extern unsigned long generic_io_base;
/*
* This function provides a method for the generic case where a
* board-specific ioport_map simply needs to return the port + some
* arbitrary port base.
*
* We use this at board setup time to implicitly set the port base, and
* as a result, we can use the generic ioport_map.
*/
static inline void __set_io_port_base(unsigned long pbase)
{
generic_io_base = pbase;
}
#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
/* We really want to try and get these to memcpy etc */
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
void memset_io(volatile void __iomem *, int, unsigned long);
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q(unsigned long long addr);
unsigned long long poke_real_address_q(unsigned long long addr,
unsigned long long val);
#if !defined(CONFIG_MMU)
#define virt_to_phys(address) ((unsigned long)(address))
#define phys_to_virt(address) ((void *)(address))
#else
#define virt_to_phys(address) (__pa(address))
#define phys_to_virt(address) (__va(address))
#endif
/*
* On 32-bit SH, we traditionally have the whole physical address space
* mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
* not need to do anything but place the address in the proper segment.
* This is true for P1 and P2 addresses, as well as some P3 ones.
* However, most of the P3 addresses and newer cores using extended
* addressing need to map through page tables, so the ioremap()
* implementation becomes a bit more complicated.
*
* See arch/sh/mm/ioremap.c for additional notes on this.
*
* We cheat a bit and always return uncachable areas until we've fixed
* the drivers to handle caching properly.
*
* On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
void __iomem *__ioremap(unsigned long offset, unsigned long size,
unsigned long flags);
void __iounmap(void __iomem *addr);
static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
#ifdef CONFIG_29BIT
unsigned long last_addr = offset + size - 1;
#endif
void __iomem *ret;
ret = __ioremap_trapped(offset, size);
if (ret)
return ret;
#ifdef CONFIG_29BIT
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
* In the P3 case or for addresses outside of the 29-bit space,
* mapping must be done by the PMB or by using page tables.
*/
if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
if (unlikely(flags & _PAGE_CACHABLE))
return (void __iomem *)P1SEGADDR(offset);
return (void __iomem *)P2SEGADDR(offset);
}
/* P4 above the store queues are always mapped. */
if (unlikely(offset >= P3_ADDR_MAX))
return (void __iomem *)P4SEGADDR(offset);
#endif
return __ioremap(offset, size, flags);
}
#else
#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset))
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
#define ioremap(offset, size) \
__ioremap_mode((offset), (size), 0)
#define ioremap_nocache(offset, size) \
__ioremap_mode((offset), (size), 0)
#define ioremap_cache(offset, size) \
__ioremap_mode((offset), (size), _PAGE_CACHABLE)
#define p3_ioremap(offset, size, flags) \
__ioremap((offset), (size), (flags))
#define ioremap_prot(offset, size, flags) \
__ioremap_mode((offset), (size), (flags))
#define iounmap(addr) \
__iounmap((addr))
#define maybebadio(port) \
printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
__func__, __LINE__, (port), (u32)__builtin_return_address(0))
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(unsigned long addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_IO_H */

View File

@@ -0,0 +1,42 @@
/*
* Trivial I/O routine definitions, intentionally meant to be included
* multiple times. Ugly I/O routine concatenation helpers taken from
* alpha. Must be included _before_ io.h to avoid preprocessor-induced
* routine mismatch.
*/
#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
#define _IO_CONCAT(a,b) a ## _ ## b
#ifndef __IO_PREFIX
#error "Don't include this header without a valid system prefix"
#endif
u8 IO_CONCAT(__IO_PREFIX,inb)(unsigned long);
u16 IO_CONCAT(__IO_PREFIX,inw)(unsigned long);
u32 IO_CONCAT(__IO_PREFIX,inl)(unsigned long);
void IO_CONCAT(__IO_PREFIX,outb)(u8, unsigned long);
void IO_CONCAT(__IO_PREFIX,outw)(u16, unsigned long);
void IO_CONCAT(__IO_PREFIX,outl)(u32, unsigned long);
u8 IO_CONCAT(__IO_PREFIX,inb_p)(unsigned long);
u16 IO_CONCAT(__IO_PREFIX,inw_p)(unsigned long);
u32 IO_CONCAT(__IO_PREFIX,inl_p)(unsigned long);
void IO_CONCAT(__IO_PREFIX,outb_p)(u8, unsigned long);
void IO_CONCAT(__IO_PREFIX,outw_p)(u16, unsigned long);
void IO_CONCAT(__IO_PREFIX,outl_p)(u32, unsigned long);
void IO_CONCAT(__IO_PREFIX,insb)(unsigned long, void *dst, unsigned long count);
void IO_CONCAT(__IO_PREFIX,insw)(unsigned long, void *dst, unsigned long count);
void IO_CONCAT(__IO_PREFIX,insl)(unsigned long, void *dst, unsigned long count);
void IO_CONCAT(__IO_PREFIX,outsb)(unsigned long, const void *src, unsigned long count);
void IO_CONCAT(__IO_PREFIX,outsw)(unsigned long, const void *src, unsigned long count);
void IO_CONCAT(__IO_PREFIX,outsl)(unsigned long, const void *src, unsigned long count);
void *IO_CONCAT(__IO_PREFIX,ioremap)(unsigned long offset, unsigned long size);
void IO_CONCAT(__IO_PREFIX,iounmap)(void *addr);
void __iomem *IO_CONCAT(__IO_PREFIX,ioport_map)(unsigned long addr, unsigned int size);
void IO_CONCAT(__IO_PREFIX,ioport_unmap)(void __iomem *addr);
#undef __IO_PREFIX

View File

@@ -0,0 +1,58 @@
#ifndef __ASM_SH_IO_TRAPPED_H
#define __ASM_SH_IO_TRAPPED_H
#include <linux/list.h>
#include <linux/ioport.h>
#include <asm/page.h>
#define IO_TRAPPED_MAGIC 0xfeedbeef
struct trapped_io {
unsigned int magic;
struct resource *resource;
unsigned int num_resources;
unsigned int minimum_bus_width;
struct list_head list;
void __iomem *virt_base;
} __aligned(PAGE_SIZE);
#ifdef CONFIG_IO_TRAPPED
int register_trapped_io(struct trapped_io *tiop);
int handle_trapped_io(struct pt_regs *regs, unsigned long address);
void __iomem *match_trapped_io_handler(struct list_head *list,
unsigned long offset,
unsigned long size);
#ifdef CONFIG_HAS_IOMEM
extern struct list_head trapped_mem;
static inline void __iomem *
__ioremap_trapped(unsigned long offset, unsigned long size)
{
return match_trapped_io_handler(&trapped_mem, offset, size);
}
#else
#define __ioremap_trapped(offset, size) NULL
#endif
#ifdef CONFIG_HAS_IOPORT
extern struct list_head trapped_io;
static inline void __iomem *
__ioport_map_trapped(unsigned long offset, unsigned long size)
{
return match_trapped_io_handler(&trapped_io, offset, size);
}
#else
#define __ioport_map_trapped(offset, size) NULL
#endif
#else
#define register_trapped_io(tiop) (-1)
#define handle_trapped_io(tiop, address) 0
#define __ioremap_trapped(offset, size) NULL
#define __ioport_map_trapped(offset, size) NULL
#endif
#endif /* __ASM_SH_IO_TRAPPED_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/ioctl.h>

View File

@@ -0,0 +1,103 @@
#ifndef __ASM_SH_IOCTLS_H
#define __ASM_SH_IOCTLS_H
#include <asm/ioctl.h>
#define FIOCLEX _IO('f', 1)
#define FIONCLEX _IO('f', 2)
#define FIOASYNC _IOW('f', 125, int)
#define FIONBIO _IOW('f', 126, int)
#define FIONREAD _IOR('f', 127, int)
#define TIOCINQ FIONREAD
#define FIOQSIZE _IOR('f', 128, loff_t)
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
#define TIOCSPGRP _IOW('t', 118, int)
#define TIOCGPGRP _IOR('t', 119, int)
#define TIOCEXCL _IO('T', 12) /* 0x540C */
#define TIOCNXCL _IO('T', 13) /* 0x540D */
#define TIOCSCTTY _IO('T', 14) /* 0x540E */
#define TIOCSTI _IOW('T', 18, char) /* 0x5412 */
#define TIOCMGET _IOR('T', 21, unsigned int) /* 0x5415 */
#define TIOCMBIS _IOW('T', 22, unsigned int) /* 0x5416 */
#define TIOCMBIC _IOW('T', 23, unsigned int) /* 0x5417 */
#define TIOCMSET _IOW('T', 24, unsigned int) /* 0x5418 */
# define TIOCM_LE 0x001
# define TIOCM_DTR 0x002
# define TIOCM_RTS 0x004
# define TIOCM_ST 0x008
# define TIOCM_SR 0x010
# define TIOCM_CTS 0x020
# define TIOCM_CAR 0x040
# define TIOCM_RNG 0x080
# define TIOCM_DSR 0x100
# define TIOCM_CD TIOCM_CAR
# define TIOCM_RI TIOCM_RNG
#define TIOCGSOFTCAR _IOR('T', 25, unsigned int) /* 0x5419 */
#define TIOCSSOFTCAR _IOW('T', 26, unsigned int) /* 0x541A */
#define TIOCLINUX _IOW('T', 28, char) /* 0x541C */
#define TIOCCONS _IO('T', 29) /* 0x541D */
#define TIOCGSERIAL 0x803C541E /* _IOR('T', 30, struct serial_struct) 0x541E */
#define TIOCSSERIAL 0x403C541F /* _IOW('T', 31, struct serial_struct) 0x541F */
#define TIOCPKT _IOW('T', 32, int) /* 0x5420 */
# define TIOCPKT_DATA 0
# define TIOCPKT_FLUSHREAD 1
# define TIOCPKT_FLUSHWRITE 2
# define TIOCPKT_STOP 4
# define TIOCPKT_START 8
# define TIOCPKT_NOSTOP 16
# define TIOCPKT_DOSTOP 32
#define TIOCNOTTY _IO('T', 34) /* 0x5422 */
#define TIOCSETD _IOW('T', 35, int) /* 0x5423 */
#define TIOCGETD _IOR('T', 36, int) /* 0x5424 */
#define TCSBRKP _IOW('T', 37, int) /* 0x5425 */ /* Needed for POSIX tcsendbreak() */
#define TIOCSBRK _IO('T', 39) /* 0x5427 */ /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* 0x5428 */ /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* 0x5429 */ /* Return the session ID of FD */
#define TCGETS2 _IOR('T', 42, struct termios2)
#define TCSETS2 _IOW('T', 43, struct termios2)
#define TCSETSW2 _IOW('T', 44, struct termios2)
#define TCSETSF2 _IOW('T', 45, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */
#define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */
#define TIOCSERSWILD _IOW('T', 85, int) /* 0x5455 */
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x80d85458 /* _IOR('T', 88, struct async_struct) 0x5458 */ /* For debugging only */
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* 0x5459 */ /* Get line status register */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#define TIOCSERGETMULTI 0x80A8545A /* _IOR('T', 90, struct serial_multiport_struct) 0x545A */ /* Get multiport config */
#define TIOCSERSETMULTI 0x40A8545B /* _IOW('T', 91, struct serial_multiport_struct) 0x545B */ /* Set multiport config */
#define TIOCMIWAIT _IO('T', 92) /* 0x545C */ /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#endif /* __ASM_SH_IOCTLS_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/ipcbuf.h>

View File

@@ -0,0 +1,21 @@
/*
* include/asm-sh/irl.h
*
* Copyright (c) STMicroelectronics 2007
*
* IRL interrupt lines
*/
#ifndef _ASM_SH_IRL_H
#define _ASM_SH_IRL_H
/*
* Interrupts which are generated by IRL lines when in individual interrupt
* mode. See plat_irq_setup_pins().
*/
#define IRL0_IRQ 2
#define IRL1_IRQ 5
#define IRL2_IRQ 8
#define IRL3_IRQ 11
#endif /* _ASM_SH_IRL_H */

View File

@@ -0,0 +1,66 @@
/*
* Copyright (C) 2008 STMicroelectronics Limited
* Author: Stuart Menefy <stuart.menefy@st.com>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*/
#ifndef __ASM_SH_IRQ_ILC_H
#define __ASM_SH_IRQ_ILC_H
#include <linux/platform_device.h>
#include <linux/hardirq.h>
#if defined(CONFIG_CPU_SUBTYPE_FLI7510)
#define ILC_FIRST_IRQ 44
#define ILC_NR_IRQS 136
#define ILC_IRQ(x) (ILC_FIRST_IRQ + (x))
#elif defined(CONFIG_CPU_SUBTYPE_STX5197)
#define ILC_FIRST_IRQ 33
#define ILC_NR_IRQS 72
#define ILC_IRQ(x) (ILC_FIRST_IRQ + (x))
#define ILC_EXT_IRQ(x) (ILC_FIRST_IRQ + 64 + (x))
#elif defined(CONFIG_CPU_SUBTYPE_STX5206)
#define ILC_FIRST_IRQ 176
#define ILC_NR_IRQS (64 + 30)
#define ILC_INT_IRQ(x) (ILC_FIRST_IRQ + (x))
#define ILC_EXT_IRQ(x) (ILC_FIRST_IRQ + 64 + (x))
#define ILC_IRQ(x) ILC_INT_IRQ(x)
#elif defined(CONFIG_CPU_SUBTYPE_STX7105)
#define ILC_FIRST_IRQ 176
#define ILC_NR_IRQS (64 + 42)
#define ILC_INT_IRQ(x) (ILC_FIRST_IRQ + (x))
#define ILC_EXT_IRQ(x) (ILC_FIRST_IRQ + 64 + (x))
#define ILC_IRQ(x) ILC_INT_IRQ(x)
#elif defined(CONFIG_CPU_SUBTYPE_STX7108)
/* set this to 65 to allow 64 (INTEVT 0xa00) to demux */
#define ILC_FIRST_IRQ 65
#define ILC_NR_IRQS 190
#define ILC_IRQ(x) (ILC_FIRST_IRQ + (x))
#elif defined(CONFIG_CPU_SUBTYPE_STX7111)
#define ILC_FIRST_IRQ 176
#define ILC_NR_IRQS (64+36)
#define ILC_INT_IRQ(x) (ILC_FIRST_IRQ + (x))
#define ILC_EXT_IRQ(x) (ILC_FIRST_IRQ + 64 + (x))
#define ILC_IRQ(x) ILC_INT_IRQ(x)
#elif defined(CONFIG_CPU_SUBTYPE_STX7141)
/* set this to 65 to allow 64 (INTEVT 0xa00) to demux */
#define ILC_FIRST_IRQ 65
#define ILC_NR_IRQS 180
#define ILC_IRQ(x) (ILC_FIRST_IRQ + (x))
#define COMMS_ILC_FIRST_IRQ (ILC_FIRST_IRQ + ILC_NR_IRQS)
#define COMMS_ILC_NR_IRQS (64 + 96)
#define COMMS_ILC_IRQ(x) (COMMS_ILC_FIRST_IRQ + (x))
#define COMMS_ILC_EXT_IRQ(x) (COMMS_ILC_FIRST_IRQ + 64 + (x))
#elif defined(CONFIG_CPU_SUBTYPE_STX7200)
#define ILC_FIRST_IRQ 44
#define ILC_NR_IRQS 150
#define ILC_IRQ(x) (ILC_FIRST_IRQ + (x))
#endif
void __init ilc_early_init(struct platform_device* pdev);
int ilc2irq(unsigned int evtcode);
#endif

View File

@@ -0,0 +1,9 @@
void ilc_route_external(int ilc_irq, int ext_out, int invert);
#define ILC_EXT_IRQ0 64
#define ILC_EXT_IRQ1 65
#define ILC_EXT_IRQ2 66
#define ILC_EXT_IRQ3 67
#define ILC_EXT_IRB_WAKEUP 68
#define ILC_EXT_NMI 69
#define ILC_EXT_MDINT 70
#define ILC_EXT_LOWPOWEROUT 71

View File

@@ -0,0 +1,61 @@
#ifndef __ASM_SH_IRQ_H
#define __ASM_SH_IRQ_H
#include <asm/machvec.h>
/*
* A sane default based on a reasonable vector table size, platforms are
* advised to cap this at the hard limit that they're interested in
* through the machvec.
*/
#define NR_IRQS 500
#define NR_IRQS_LEGACY 8 /* Legacy external IRQ0-7 */
/*
* Convert back and forth between INTEVT and IRQ values.
*/
#ifdef CONFIG_CPU_HAS_INTEVT
#define evt2irq(evt) (((evt) >> 5) - 16)
#define irq2evt(irq) (((irq) + 16) << 5)
#else
#define evt2irq(evt) (evt)
#define irq2evt(irq) (irq)
#endif
/*
* Simple Mask Register Support
*/
extern void make_maskreg_irq(unsigned int irq);
extern unsigned short *irq_mask_register;
/*
* PINT IRQs
*/
void init_IRQ_pint(void);
void make_imask_irq(unsigned int irq);
static inline int generic_irq_demux(int irq)
{
return irq;
}
#define irq_demux(irq) sh_mv.mv_irq_demux(irq)
void init_IRQ(void);
asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS
extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu);
# define __ARCH_HAS_DO_SOFTIRQ
#else
# define irq_ctx_init(cpu) do { } while (0)
# define irq_ctx_exit(cpu) do { } while (0)
#endif
#include <asm-generic/irq.h>
#ifdef CONFIG_CPU_SH5
#include <cpu/irq.h>
#endif
#endif /* __ASM_SH_IRQ_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/irq_regs.h>

View File

@@ -0,0 +1,34 @@
#ifndef __ASM_SH_IRQFLAGS_H
#define __ASM_SH_IRQFLAGS_H
#ifdef CONFIG_SUPERH32
#include "irqflags_32.h"
#else
#include "irqflags_64.h"
#endif
#define raw_local_save_flags(flags) \
do { (flags) = __raw_local_save_flags(); } while (0)
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return (flags != 0);
}
static inline int raw_irqs_disabled(void)
{
unsigned long flags = __raw_local_save_flags();
return raw_irqs_disabled_flags(flags);
}
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
static inline void raw_local_irq_restore(unsigned long flags)
{
if ((flags & 0xf0) != 0xf0)
raw_local_irq_enable();
}
#endif /* __ASM_SH_IRQFLAGS_H */

View File

@@ -0,0 +1,99 @@
#ifndef __ASM_SH_IRQFLAGS_32_H
#define __ASM_SH_IRQFLAGS_32_H
static inline void raw_local_irq_enable(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %1, %0\n\t"
#ifdef CONFIG_CPU_HAS_SR_RB
"stc r6_bank, %1\n\t"
"or %1, %0\n\t"
#endif
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x000000f0)
: "memory"
);
}
static inline void raw_local_irq_disable(void)
{
unsigned long flags;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
: "=&z" (flags)
: /* no inputs */
: "memory"
);
}
static inline void set_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or %2, %0\n\t"
"and %3, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "r" (0x10000000), "r" (0xffffff0f)
: "memory"
);
}
static inline void clear_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %2, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x10000000)
: "memory"
);
}
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
: "=&z" (flags)
: /* no inputs */
: "memory"
);
return flags;
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags, __dummy;
__asm__ __volatile__ (
"stc sr, %1\n\t"
"mov %1, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
"mov %1, %0\n\t"
"and #0xf0, %0\n\t"
: "=&z" (flags), "=&r" (__dummy)
: /* no inputs */
: "memory"
);
return flags;
}
#endif /* __ASM_SH_IRQFLAGS_32_H */

View File

@@ -0,0 +1,85 @@
#ifndef __ASM_SH_IRQFLAGS_64_H
#define __ASM_SH_IRQFLAGS_64_H
#include <cpu/registers.h>
#define SR_MASK_LL 0x00000000000000f0LL
#define SR_BL_LL 0x0000000010000000LL
static inline void raw_local_irq_enable(void)
{
unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline void raw_local_irq_disable(void)
{
unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline void set_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline void clear_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long long __dummy = SR_MASK_LL;
unsigned long flags;
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0"
: "=&r" (flags)
: "r" (__dummy));
return flags;
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
unsigned long flags;
__asm__ __volatile__ (
"getcon " __SR ", %1\n\t"
"or %1, r63, %0\n\t"
"or %1, %2, %1\n\t"
"putcon %1, " __SR "\n\t"
"and %0, %2, %0"
: "=&r" (flags), "=&r" (__dummy0)
: "r" (__dummy1));
return flags;
}
#endif /* __ASM_SH_IRQFLAGS_64_H */

View File

@@ -0,0 +1,11 @@
#ifndef __ASM_SH_KDEBUG_H
#define __ASM_SH_KDEBUG_H
/* Grossly misnamed. */
enum die_val {
DIE_TRAP,
DIE_NMI,
DIE_OOPS,
};
#endif /* __ASM_SH_KDEBUG_H */

View File

@@ -0,0 +1,62 @@
#ifndef __ASM_SH_KEXEC_H
#define __ASM_SH_KEXEC_H
#include <asm/ptrace.h>
#include <asm/string.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
* and kmap is not required.
*
* Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
* calculation for the amount of memory directly mappable into the
* kernel memory space.
*/
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else {
__asm__ __volatile__ ("mov r0, %0" : "=r" (newregs->regs[0]));
__asm__ __volatile__ ("mov r1, %0" : "=r" (newregs->regs[1]));
__asm__ __volatile__ ("mov r2, %0" : "=r" (newregs->regs[2]));
__asm__ __volatile__ ("mov r3, %0" : "=r" (newregs->regs[3]));
__asm__ __volatile__ ("mov r4, %0" : "=r" (newregs->regs[4]));
__asm__ __volatile__ ("mov r5, %0" : "=r" (newregs->regs[5]));
__asm__ __volatile__ ("mov r6, %0" : "=r" (newregs->regs[6]));
__asm__ __volatile__ ("mov r7, %0" : "=r" (newregs->regs[7]));
__asm__ __volatile__ ("mov r8, %0" : "=r" (newregs->regs[8]));
__asm__ __volatile__ ("mov r9, %0" : "=r" (newregs->regs[9]));
__asm__ __volatile__ ("mov r10, %0" : "=r" (newregs->regs[10]));
__asm__ __volatile__ ("mov r11, %0" : "=r" (newregs->regs[11]));
__asm__ __volatile__ ("mov r12, %0" : "=r" (newregs->regs[12]));
__asm__ __volatile__ ("mov r13, %0" : "=r" (newregs->regs[13]));
__asm__ __volatile__ ("mov r14, %0" : "=r" (newregs->regs[14]));
__asm__ __volatile__ ("mov r15, %0" : "=r" (newregs->regs[15]));
__asm__ __volatile__ ("sts pr, %0" : "=r" (newregs->pr));
__asm__ __volatile__ ("sts macl, %0" : "=r" (newregs->macl));
__asm__ __volatile__ ("sts mach, %0" : "=r" (newregs->mach));
__asm__ __volatile__ ("stc gbr, %0" : "=r" (newregs->gbr));
__asm__ __volatile__ ("stc sr, %0" : "=r" (newregs->sr));
newregs->pc = (unsigned long)current_text_addr();
}
}
#endif /* __ASM_SH_KEXEC_H */

View File

@@ -0,0 +1,38 @@
#ifndef __ASM_SH_KGDB_H
#define __ASM_SH_KGDB_H
#include <asm/cacheflush.h>
#include <asm/ptrace.h>
/* Same as pt_regs but has vbr in place of syscall_nr */
struct kgdb_regs {
unsigned long regs[16];
unsigned long pc;
unsigned long pr;
unsigned long sr;
unsigned long gbr;
unsigned long mach;
unsigned long macl;
unsigned long vbr;
};
enum regnames {
GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7,
GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15,
GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR,
};
#define NUMREGBYTES ((GDB_VBR + 1) * 4)
static inline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ("trapa #0x3c\n");
}
#define BUFMAX 2048
#define CACHE_FLUSH_IS_SAFE 1
#define BREAK_INSTR_SIZE 2
#endif /* __ASM_SH_KGDB_H */

View File

@@ -0,0 +1,14 @@
#ifndef __SH_KMAP_TYPES_H
#define __SH_KMAP_TYPES_H
/* Dummy header just to define km_type. */
#ifdef CONFIG_DEBUG_HIGHMEM
#define __WITH_KM_FENCE
#endif
#include <asm-generic/kmap_types.h>
#undef __WITH_KM_FENCE
#endif

View File

@@ -0,0 +1,59 @@
#ifndef __ASM_SH_KPROBES_H
#define __ASM_SH_KPROBES_H
#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
typedef insn_size_t kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xc3c3
#define BREAKPOINT_INSTRUCTION_REMOTE 0xc320
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define regs_return_value(_regs) ((_regs)->regs[0])
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
struct kprobe;
void arch_remove_kprobe(struct kprobe *);
void kretprobe_trampoline(void);
void jprobe_return_end(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE];
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long jprobe_saved_r15;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_handle_illslot(unsigned long pc);
#else
#define kprobe_handle_illslot(pc) (-1)
#endif /* CONFIG_KPROBES */
#endif /* __ASM_SH_KPROBES_H */

View File

@@ -0,0 +1,47 @@
#ifndef _LINUX_KPTRACE_H
#define _LINUX_KPTRACE_H
/*
* Kptrace - KProbes-based tracing
* include/linux/kptrace.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) STMicroelectronics, 2007, 2008
*
* 2007-July Created by Chris Smith <chris.smith@st.com>
* 2008-August kpprintf added by Chris Smith <chris.smith@st.com>
*/
#define KPTRACE_BUF_SIZE 1024
#define KPTRACE_SMALL_BUF 128
#ifdef CONFIG_KPTRACE
/* Mark a particular point in the code as "interesting" in the kptrace log */
void kptrace_mark(void);
/* Write a string to the kptrace log */
void kptrace_write_record(const char *buf);
/* Stop logging trace records until kptrace_restart() is called */
void kptrace_pause(void);
/* Restart logging of trace records after a kptrace_pause() */
void kptrace_restart(void);
/* Allow printf-style records to be added */
void kpprintf(char *fmt, ...);
#endif /* CONFIG_KPTRACE */
#endif /* _LINUX_KPTRACE_H */

View File

@@ -0,0 +1,60 @@
#ifndef __ASM_SH_L2_CACHEFLUSH_H
#define __ASM_SH_L2_CACHEFLUSH_H
#if defined(CONFIG_STM_L2_CACHE)
#include <asm/stm-l2-cache.h>
static inline void __l2_flush_wback_region(void *start, int size)
{
stm_l2_flush_wback((unsigned long)start, size, 0);
}
static inline void __l2_flush_purge_region(void *start, int size)
{
stm_l2_flush_purge((unsigned long)start, size, 0);
}
static inline void __l2_flush_invalidate_region(void *start, int size)
{
stm_l2_flush_invalidate((unsigned long)start, size, 0);
}
#define __l2_flush_wback_phys(start, size) \
stm_l2_flush_wback(start, size, 1)
#define __l2_flush_purge_phys(start, size) \
stm_l2_flush_purge(start, size, 1)
#define __l2_flush_invalidate_phys(start, size) \
stm_l2_flush_invalidate(start, size, 1)
#else
static inline void __l2_flush_wback_region(void *start, int size)
{
}
static inline void __l2_flush_purge_region(void *start, int size)
{
}
static inline void __l2_flush_invalidate_region(void *start, int size)
{
}
static inline void __l2_flush_wback_phys(unsigned long start, int size)
{
}
static inline void __l2_flush_purge_phys(unsigned long start, int size)
{
}
static inline void __l2_flush_invalidate_phys(unsigned long start, int size)
{
}
#endif
#endif

View File

@@ -0,0 +1,7 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .balign 4
#define __ALIGN_STR ".balign 4"
#endif

View File

@@ -0,0 +1,6 @@
#ifndef __ASM_SH_LMB_H
#define __ASM_SH_LMB_H
#define LMB_REAL_LIMIT 0
#endif /* __ASM_SH_LMB_H */

View File

@@ -0,0 +1,7 @@
#ifndef __ASM_SH_LOCAL_H
#define __ASM_SH_LOCAL_H
#include <asm-generic/local.h>
#endif /* __ASM_SH_LOCAL_H */

View File

@@ -0,0 +1,61 @@
/*
* include/asm-sh/machvec.h
*
* Copyright 2000 Stuart Menefy (stuart.menefy@st.com)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*/
#ifndef _ASM_SH_MACHVEC_H
#define _ASM_SH_MACHVEC_H
#include <linux/types.h>
#include <linux/time.h>
#include <asm/machtypes.h>
struct sh_machine_vector {
void (*mv_setup)(char **cmdline_p);
const char *mv_name;
int mv_nr_irqs;
u8 (*mv_inb)(unsigned long);
u16 (*mv_inw)(unsigned long);
u32 (*mv_inl)(unsigned long);
void (*mv_outb)(u8, unsigned long);
void (*mv_outw)(u16, unsigned long);
void (*mv_outl)(u32, unsigned long);
u8 (*mv_inb_p)(unsigned long);
u16 (*mv_inw_p)(unsigned long);
u32 (*mv_inl_p)(unsigned long);
void (*mv_outb_p)(u8, unsigned long);
void (*mv_outw_p)(u16, unsigned long);
void (*mv_outl_p)(u32, unsigned long);
void (*mv_insb)(unsigned long, void *dst, unsigned long count);
void (*mv_insw)(unsigned long, void *dst, unsigned long count);
void (*mv_insl)(unsigned long, void *dst, unsigned long count);
void (*mv_outsb)(unsigned long, const void *src, unsigned long count);
void (*mv_outsw)(unsigned long, const void *src, unsigned long count);
void (*mv_outsl)(unsigned long, const void *src, unsigned long count);
int (*mv_irq_demux)(int irq);
void (*mv_init_irq)(void);
void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
void (*mv_ioport_unmap)(void __iomem *);
int (*mv_clk_init)(void);
int (*mv_mode_pins)(void);
};
extern struct sh_machine_vector sh_mv;
#define get_system_type() sh_mv.mv_name
#define __initmv \
__used __section(.machvec.init)
#endif /* _ASM_SH_MACHVEC_H */

View File

@@ -0,0 +1,7 @@
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
#endif /* _ASM_MC146818RTC_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/mman.h>

View File

@@ -0,0 +1,68 @@
#ifndef __MMU_H
#define __MMU_H
/*
* Privileged Space Mapping Buffer (PMB) definitions
*/
#define PMB_PASCR 0xff000070
#define PMB_IRMCR 0xff000078
#define PASCR_SE 0x80000000
#define PMB_ADDR 0xf6100000
#define PMB_DATA 0xf7100000
#define PMB_ENTRY_MAX 16
#define PMB_E_MASK 0x0000000f
#define PMB_E_SHIFT 8
#define PMB_SZ_16M 0x00000000
#define PMB_SZ_64M 0x00000010
#define PMB_SZ_128M 0x00000080
#define PMB_SZ_512M 0x00000090
#define PMB_SZ_MASK PMB_SZ_512M
#define PMB_C 0x00000008
#define PMB_WT 0x00000001
#define PMB_UB 0x00000200
#define PMB_V 0x00000100
#define PMB_NO_ENTRY (-1)
#ifndef __ASSEMBLY__
/* Default "unsigned long" context */
typedef unsigned long mm_context_id_t[NR_CPUS];
typedef struct {
#ifdef CONFIG_MMU
mm_context_id_t id;
void *vdso;
#else
unsigned long end_brk;
#endif
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
} mm_context_t;
#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
bool __in_29bit_mode(void);
long pmb_remap(unsigned long phys, unsigned long size, unsigned long flags);
int pmb_unmap(unsigned long addr);
void pmb_init(void);
int pmb_virt_to_phys(void *addr, unsigned long *phys, unsigned long *flags);
#else
#ifdef CONFIG_29BIT
#define __in_29bit_mode() (1)
#else
#define __in_29bit_mode() (0)
#endif
#endif /* CONFIG_PMB */
#endif /* __ASSEMBLY__ */
#endif /* __MMU_H */

View File

@@ -0,0 +1,196 @@
/*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 - 2007 Paul Mundt
*
* ASID handling idea taken from MIPS implementation.
*/
#ifndef __ASM_SH_MMU_CONTEXT_H
#define __ASM_SH_MMU_CONTEXT_H
#ifdef __KERNEL__
#include <cpu/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm-generic/mm_hooks.h>
/*
* The MMU "context" consists of two things:
* (a) TLB cache version (or round, cycle whatever expression you like)
* (b) ASID (Address Space IDentifier)
*/
#ifdef CONFIG_CPU_HAS_PTEAEX
#define MMU_CONTEXT_ASID_MASK 0x0000ffff
#else
#define MMU_CONTEXT_ASID_MASK 0x000000ff
#endif
#define MMU_CONTEXT_VERSION_MASK (~0UL & ~MMU_CONTEXT_ASID_MASK)
#define MMU_CONTEXT_FIRST_VERSION (MMU_CONTEXT_ASID_MASK + 1)
/* Impossible ASID value, to differentiate from NO_CONTEXT. */
#define MMU_NO_ASID MMU_CONTEXT_FIRST_VERSION
#define NO_CONTEXT 0UL
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#ifdef CONFIG_MMU
#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
/*
* Virtual Page Number mask
*/
#define MMU_VPN_MASK 0xfffff000
#if defined(CONFIG_SUPERH32)
#include "mmu_context_32.h"
#else
#include "mmu_context_64.h"
#endif
/*
* Get MMU context if needed.
*/
static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
unsigned long asid = asid_cache(cpu);
/* Check if we have old version of context. */
if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
/* It's up to date, do nothing */
return;
/* It's old, we need to get new context with new version. */
if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
/*
* We exhaust ASID of this version.
* Flush all TLB and start new cycle.
*/
local_flush_tlb_all();
#ifdef CONFIG_SUPERH64
/*
* The SH-5 cache uses the ASIDs, requiring both the I and D
* cache to be flushed when the ASID is exhausted. Weak.
*/
flush_cache_all();
#endif
/*
* Fix version; Note that we avoid version #0
* to distingush NO_CONTEXT.
*/
if (!asid)
asid = MMU_CONTEXT_FIRST_VERSION;
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
int i;
for (i = 0; i < num_online_cpus(); i++)
cpu_context(i, mm) = NO_CONTEXT;
return 0;
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
{
get_mmu_context(mm, cpu);
set_asid(cpu_asid(cpu, mm));
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
if (likely(prev != next)) {
cpumask_set_cpu(cpu, mm_cpumask(next));
set_TTB(next->pgd);
activate_context(next, cpu);
} else
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
activate_context(next, cpu);
}
#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
#define deactivate_mm(tsk,mm) do { } while (0)
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#else
#define set_asid(asid) do { } while (0)
#define get_asid() (0)
#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; })
#define switch_and_save_asid(asid) (0)
#define set_TTB(pgd) do { } while (0)
#define get_TTB() (0)
#include <asm-generic/mmu_context.h>
#endif /* CONFIG_MMU */
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
/*
* If this processor has an MMU, we need methods to turn it off/on ..
* paging_init() will also have to be updated for the processor in
* question.
*/
static inline void enable_mmu(void)
{
unsigned int cpu = smp_processor_id();
unsigned long mmucr_init = MMU_CONTROL_INIT;
/* Enable MMU */
#ifdef CONFIG_32BIT
if (1) /* SH4-202 and SE */
mmucr_init |= MMUCR_SE;
#endif
ctrl_outl(mmucr_init, MMUCR);
ctrl_barrier();
if (asid_cache(cpu) == NO_CONTEXT)
asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
}
static inline void disable_mmu(void)
{
unsigned long cr;
cr = ctrl_inl(MMUCR);
cr &= ~MMU_CONTROL_INIT;
ctrl_outl(cr, MMUCR);
ctrl_barrier();
}
#else
/*
* MMU control handlers for processors lacking memory
* management hardware.
*/
#define enable_mmu() do { } while (0)
#define disable_mmu() do { } while (0)
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMU_CONTEXT_H */

View File

@@ -0,0 +1,59 @@
#ifndef __ASM_SH_MMU_CONTEXT_32_H
#define __ASM_SH_MMU_CONTEXT_32_H
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Do nothing */
}
#ifdef CONFIG_CPU_HAS_PTEAEX
static inline void set_asid(unsigned long asid)
{
__raw_writel(asid, MMU_PTEAEX);
}
static inline unsigned long get_asid(void)
{
return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
}
#else
static inline void set_asid(unsigned long asid)
{
unsigned long __dummy;
__asm__ __volatile__ ("mov.l %2, %0\n\t"
"and %3, %0\n\t"
"or %1, %0\n\t"
"mov.l %0, %2"
: "=&r" (__dummy)
: "r" (asid), "m" (__m(MMU_PTEH)),
"r" (0xffffff00));
}
static inline unsigned long get_asid(void)
{
unsigned long asid;
__asm__ __volatile__ ("mov.l %1, %0"
: "=r" (asid)
: "m" (__m(MMU_PTEH)));
asid &= MMU_CONTEXT_ASID_MASK;
return asid;
}
#endif /* CONFIG_CPU_HAS_PTEAEX */
/* MMU_TTB is used for optimizing the fault handling. */
static inline void set_TTB(pgd_t *pgd)
{
ctrl_outl((unsigned long)pgd, MMU_TTB);
}
static inline pgd_t *get_TTB(void)
{
return (pgd_t *)ctrl_inl(MMU_TTB);
}
#endif /* __ASM_SH_MMU_CONTEXT_32_H */

View File

@@ -0,0 +1,78 @@
#ifndef __ASM_SH_MMU_CONTEXT_64_H
#define __ASM_SH_MMU_CONTEXT_64_H
/*
* sh64-specific mmu_context interface.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <cpu/registers.h>
#include <asm/cacheflush.h>
#define SR_ASID_MASK 0xffffffffff00ffffULL
#define SR_ASID_SHIFT 16
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Well, at least free TLB entries */
flush_tlb_mm(mm);
}
static inline unsigned long get_asid(void)
{
unsigned long long sr;
asm volatile ("getcon " __SR ", %0\n\t"
: "=r" (sr));
sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
return (unsigned long) sr;
}
/* Set ASID into SR */
static inline void set_asid(unsigned long asid)
{
unsigned long long sr, pc;
asm volatile ("getcon " __SR ", %0" : "=r" (sr));
sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
/*
* It is possible that this function may be inlined and so to avoid
* the assembler reporting duplicate symbols we make use of the
* gas trick of generating symbols using numerics and forward
* reference.
*/
asm volatile ("movi 1, %1\n\t"
"shlli %1, 28, %1\n\t"
"or %0, %1, %1\n\t"
"putcon %1, " __SR "\n\t"
"putcon %0, " __SSR "\n\t"
"movi 1f, %1\n\t"
"ori %1, 1 , %1\n\t"
"putcon %1, " __SPC "\n\t"
"rte\n"
"1:\n\t"
: "=r" (sr), "=r" (pc) : "0" (sr));
}
/* arch/sh/kernel/cpu/sh5/entry.S */
extern unsigned long switch_and_save_asid(unsigned long new_asid);
/* No spare register to twiddle, so use a software cache */
extern pgd_t *mmu_pdtp_cache;
#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
#define get_TTB() (mmu_pdtp_cache)
#endif /* __ASM_SH_MMU_CONTEXT_64_H */

View File

@@ -0,0 +1,50 @@
#ifndef __ASM_SH_MMZONE_H
#define __ASM_SH_MMZONE_H
#ifdef __KERNEL__
#ifdef CONFIG_NEED_MULTIPLE_NODES
#include <linux/numa.h>
extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
NODE_DATA(nid)->node_spanned_pages)
static inline int pfn_to_nid(unsigned long pfn)
{
int nid;
for (nid = 0; nid < MAX_NUMNODES; nid++)
if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
break;
return nid;
}
static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
{
return NODE_DATA(pfn_to_nid(pfn));
}
/* arch/sh/mm/numa.c */
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end);
#else
static inline void
setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
}
#endif /* CONFIG_NEED_MULTIPLE_NODES */
/* Platform specific mem init */
void __init plat_mem_setup(void);
/* arch/sh/kernel/setup.c */
void __init setup_bootmem_allocator(unsigned long start_pfn);
void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMZONE_H */

View File

@@ -0,0 +1,34 @@
#ifndef _ASM_SH_MODULE_H
#define _ASM_SH_MODULE_H
#include <asm-generic/module.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# ifdef CONFIG_CPU_SH2
# define MODULE_PROC_FAMILY "SH2LE "
# elif defined CONFIG_CPU_SH3
# define MODULE_PROC_FAMILY "SH3LE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4LE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5LE "
# else
# error unknown processor family
# endif
#else
# ifdef CONFIG_CPU_SH2
# define MODULE_PROC_FAMILY "SH2BE "
# elif defined CONFIG_CPU_SH3
# define MODULE_PROC_FAMILY "SH3BE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4BE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5BE "
# else
# error unknown processor family
# endif
#endif
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#endif /* _ASM_SH_MODULE_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/msgbuf.h>

View File

@@ -0,0 +1,109 @@
/*
* arch/sh/include/asm/mutex-llsc.h
*
* SH-4A optimized mutex locking primitives
*
* Please look into asm-generic/mutex-xchg.h for a formal definition.
*/
#ifndef __ASM_SH_MUTEX_LLSC_H
#define __ASM_SH_MUTEX_LLSC_H
/*
* Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
* with a bastardized atomic decrement (it is not a reliable atomic decrement
* but it satisfies the defined semantics for our purpose, while being
* smaller and faster than a real atomic decrement or atomic swap.
* The idea is to attempt decrementing the lock value only once. If once
* decremented it isn't zero, or if its store-back fails due to a dispute
* on the exclusive store, we simply bail out immediately through the slow
* path where the lock will be reattempted until it succeeds.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n"
"add #-1, %0 \n"
"movco.l %0, @%2 \n"
"movt %1 \n"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res != 0))
fail_fn(count);
}
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n"
"add #-1, %0 \n"
"movco.l %0, @%2 \n"
"movt %1 \n"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res != 0))
__res = fail_fn(count);
return __res;
}
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%2 \n\t"
"movt %1 \n\t"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res <= 0))
fail_fn(count);
}
/*
* If the unlock was done on a contended lock, or if the unlock simply fails
* then the mutex remains locked.
*/
#define __mutex_slowpath_needs_to_unlock() 1
/*
* For __mutex_fastpath_trylock we do an atomic decrement and check the
* result and put it in the __res variable.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int __res, __orig;
__asm__ __volatile__ (
"1: movli.l @%2, %0 \n\t"
"dt %0 \n\t"
"movco.l %0,@%2 \n\t"
"bf 1b \n\t"
"cmp/eq #0,%0 \n\t"
"bt 2f \n\t"
"mov #0, %1 \n\t"
"bf 3f \n\t"
"2: mov #1, %1 \n\t"
"3: "
: "=&z" (__orig), "=&r" (__res)
: "r" (&count->counter)
: "t");
return __res;
}
#endif /* __ASM_SH_MUTEX_LLSC_H */

View File

@@ -0,0 +1,12 @@
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#if defined(CONFIG_CPU_SH4A)
#include <asm/mutex-llsc.h>
#else
#include <asm-generic/mutex-dec.h>
#endif

View File

@@ -0,0 +1,192 @@
#ifndef __ASM_SH_PAGE_H
#define __ASM_SH_PAGE_H
/*
* Copyright (C) 1999 Niibe Yutaka
*/
#include <linux/const.h>
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12
#elif defined(CONFIG_PAGE_SIZE_8KB)
# define PAGE_SHIFT 13
#elif defined(CONFIG_PAGE_SIZE_16KB)
# define PAGE_SHIFT 14
#elif defined(CONFIG_PAGE_SIZE_64KB)
# define PAGE_SHIFT 16
#else
# error "Bogus kernel page size?"
#endif
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PTE_MASK PAGE_MASK
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
#define HPAGE_SHIFT 18
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
#define HPAGE_SHIFT 20
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
#define HPAGE_SHIFT 26
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE-1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
#endif
#ifndef __ASSEMBLY__
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end;
static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2)
{
return (addr1 ^ addr2) & shm_align_mask;
}
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
struct page;
struct vm_area_struct;
extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
#define clear_user_highpage clear_user_highpage
/*
* These are used to make use of C type-checking..
*/
#ifdef CONFIG_X2TLB
typedef struct { unsigned long pte_low, pte_high; } pte_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
typedef struct { unsigned long long pgd; } pgd_t;
#define pte_val(x) \
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
#elif defined(CONFIG_SUPERH32)
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#else
typedef struct { unsigned long long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#endif
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
typedef struct page *pgtable_t;
#define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK)
#endif /* !__ASSEMBLY__ */
/*
* __MEMORY_START and SIZE are the physical addresses and size of RAM.
*/
#define __MEMORY_START CONFIG_MEMORY_START
#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
/*
* PAGE_OFFSET is the virtual address of the start of kernel address
* space.
*/
#define PAGE_OFFSET CONFIG_PAGE_OFFSET
/*
* Virtual to physical RAM address translation.
*
* In 29 bit mode, the physical offset of RAM from address 0 is visible in
* the kernel virtual address space, and thus we don't have to take
* this into account when translating. However in 32 bit mode this offset
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
#if defined(CONFIG_PMB_FIXED)
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
#elif defined(CONFIG_32BIT)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
#else
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
* PFN = physical frame number (ie PFN 0 == physical address 0)
* PFN_START is the PFN of the first page of RAM. By defining this we
* don't have struct page entries for the portion of address space
* between physical address 0 and the start of RAM.
*/
#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
#define ARCH_PFN_OFFSET (PFN_START)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
#endif
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
/* vDSO support */
#ifdef CONFIG_VSYSCALL
#define __HAVE_ARCH_GATE_AREA
#endif
/*
* Some drivers need to perform DMA into kmalloc'ed buffers
* and so we have to increase the kmalloc minalign for this.
*/
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SUPERH64
/*
* While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
* happily generate {ld/st}.q pairs, requiring us to have 8-byte
* alignment to avoid traps. The kmalloc alignment is gauranteed by
* virtue of L1_CACHE_BYTES, requiring this to only be special cased
* for slab caches.
*/
#define ARCH_SLAB_MINALIGN 8
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/param.h>

View File

@@ -0,0 +1 @@
#include <asm-generic/parport.h>

View File

@@ -0,0 +1,150 @@
#ifndef __ASM_SH_PCI_H
#define __ASM_SH_PCI_H
#ifdef __KERNEL__
#include <linux/dma-mapping.h>
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
#define pcibios_assign_all_busses() 1
/*
* A board can define one or more PCI channels that represent built-in (or
* external) PCI controllers.
*/
struct pci_channel {
struct pci_channel *next;
struct pci_ops *pci_ops;
struct resource *io_resource;
struct resource *mem_resource;
unsigned long io_offset;
unsigned long mem_offset;
unsigned long reg_base;
unsigned long io_map_base;
};
extern void register_pci_controller(struct pci_channel *hose);
extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
struct pci_dev;
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
extern void pcibios_set_master(struct pci_dev *dev);
static inline void pcibios_penalize_isa_irq(int irq, int active)
{
/* We don't do dynamic PCI IRQ allocation */
}
/* Dynamic DMA mapping stuff.
* SuperH has everything mapped statically like x86.
*/
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/io.h>
/* pci_unmap_{single,page} being a nop depends upon the
* configuration.
*/
#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
__u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) \
((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
(((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) \
((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
#else
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
#ifdef CONFIG_PCI
/*
* None of the SH PCI controllers support MWI, it is always treated as a
* direct memory write.
*/
#define PCI_DISABLE_MWI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = L1_CACHE_BYTES;
else
cacheline_size = byte << 2;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
#endif
#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_SUBTYPE_ST40)
/*
* If we're on an SH7751 or SH7780 PCI controller, PCI memory is mapped
* at the end of the address space in a special non-translatable area.
*/
#define PCI_MEM_FIXED_START 0xfd000000
#define PCI_MEM_FIXED_END (PCI_MEM_FIXED_START + 0x01000000)
#define is_pci_memory_fixed_range(s, e) \
((s) >= PCI_MEM_FIXED_START && (e) < PCI_MEM_FIXED_END)
#else
#define is_pci_memory_fixed_range(s, e) (0)
#endif
/* Board-specific fixup routines. */
int pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin);
extern void pcibios_resource_to_bus(struct pci_dev *dev,
struct pci_bus_region *region, struct resource *res);
extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region);
/* Chances are this interrupt is wired PC-style ... */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
/* generic DMA-mapping stuff */
#include <asm-generic/pci-dma-compat.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PCI_H */

Some files were not shown because too many files have changed in this diff Show More