add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
header-y += user.h
header-y += svinto.h
header-y += sv_addr_ag.h
header-y += sv_addr.agh

View File

@@ -0,0 +1,7 @@
#ifndef __ASM_CRIS_ARCH_ATOMIC__
#define __ASM_CRIS_ARCH_ATOMIC__
#define cris_atomic_save(addr, flags) local_irq_save(flags);
#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
#endif

View File

@@ -0,0 +1,73 @@
/* asm/arch/bitops.h for Linux/CRISv10 */
#ifndef _CRIS_ARCH_BITOPS_H
#define _CRIS_ARCH_BITOPS_H
/*
* Helper functions for the core of the ff[sz] functions, wrapping the
* syntactically awkward asms. The asms compute the number of leading
* zeroes of a bits-in-byte and byte-in-word and word-in-dword-swapped
* number. They differ in that the first function also inverts all bits
* in the input.
*/
static inline unsigned long cris_swapnwbrlz(unsigned long w)
{
/* Let's just say we return the result in the same register as the
input. Saying we clobber the input but can return the result
in another register:
! __asm__ ("swapnwbr %2\n\tlz %2,%0"
! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w));
confuses gcc (sched.c, gcc from cris-dist-1.14). */
unsigned long res;
__asm__ ("swapnwbr %0 \n\t"
"lz %0,%0"
: "=r" (res) : "0" (w));
return res;
}
static inline unsigned long cris_swapwbrlz(unsigned long w)
{
unsigned res;
__asm__ ("swapwbr %0 \n\t"
"lz %0,%0"
: "=r" (res)
: "0" (w));
return res;
}
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static inline unsigned long ffz(unsigned long w)
{
return cris_swapnwbrlz(w);
}
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
return cris_swapnwbrlz(~word);
}
/**
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline unsigned long kernel_ffs(unsigned long w)
{
return w ? cris_swapwbrlz (w) + 1 : 0;
}
#endif

View File

@@ -0,0 +1,66 @@
#ifndef __ASM_CRISv10_ARCH_BUG_H
#define __ASM_CRISv10_ARCH_BUG_H
#include <linux/stringify.h>
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
/* The BUG() macro is used for marking obviously incorrect code paths.
* It will cause a message with the file name and line number to be printed,
* and then cause an oops. The message is actually printed by handle_BUG()
* in arch/cris/kernel/traps.c, and the reason we use this method of storing
* the file name and line number is that we do not want to affect the registers
* by calling printk() before causing the oops.
*/
#define BUG_PREFIX 0x0D7F
#define BUG_MAGIC 0x00001234
struct bug_frame {
unsigned short prefix;
unsigned int magic;
unsigned short clear;
unsigned short movu;
unsigned short line;
unsigned short jump;
unsigned char *filename;
};
#if 0
/* Unfortunately this version of the macro does not work due to a problem
* with the compiler (aka a bug) when compiling with -O2, which sometimes
* erroneously causes the second input to be stored in a register...
*/
#define BUG() \
__asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
"movu.w %0,$r0\n\t" \
"jump %1\n\t" \
: : "i" (__LINE__), "i" (__FILE__))
#else
/* This version will have to do for now, until the compiler is fixed.
* The drawbacks of this version are that the file name will appear multiple
* times in the .rodata section, and that __LINE__ and __FILE__ can probably
* not be used like this with newer versions of gcc.
*/
#define BUG() \
__asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
"movu.w " __stringify(__LINE__) ",$r0\n\t"\
"jump 0f\n\t" \
".section .rodata\n" \
"0:\t.string \"" __FILE__ "\"\n\t" \
".previous")
#endif
#else
/* This just causes an oops. */
#define BUG() (*(int *)0 = 0)
#endif
#define HAVE_ARCH_BUG
#endif
#include <asm-generic/bug.h>
#endif

View File

@@ -0,0 +1,8 @@
#ifndef _ASM_ARCH_CACHE_H
#define _ASM_ARCH_CACHE_H
/* Etrax 100LX have 32-byte cache-lines. */
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
#endif /* _ASM_ARCH_CACHE_H */

View File

@@ -0,0 +1,29 @@
#ifndef _CRIS_ARCH_CHECKSUM_H
#define _CRIS_ARCH_CHECKSUM_H
/* Checksum some values used in TCP/UDP headers.
*
* The gain by doing this in asm is that C will not generate carry-additions
* for the 32-bit components of the checksum, so otherwise we would have had
* to split all of those into 16-bit components, then add.
*/
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
__wsum res;
__asm__ ("add.d %2, %0\n\t"
"ax\n\t"
"add.d %3, %0\n\t"
"ax\n\t"
"add.d %4, %0\n\t"
"ax\n\t"
"addq 0, %0\n"
: "=r" (res)
: "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8));
return res;
}
#endif

View File

@@ -0,0 +1,20 @@
#ifndef _CRIS_ARCH_DELAY_H
#define _CRIS_ARCH_DELAY_H
static inline void __delay(int loops)
{
__asm__ __volatile__ (
"move.d %0,$r9\n\t"
"beq 2f\n\t"
"subq 1,$r9\n\t"
"1:\n\t"
"bne 1b\n\t"
"subq 1,$r9\n"
"2:"
: : "g" (loops) : "r9");
}
#endif /* defined(_CRIS_ARCH_DELAY_H) */

View File

@@ -0,0 +1,74 @@
/* Defines for using and allocating dma channels. */
#ifndef _ASM_ARCH_DMA_H
#define _ASM_ARCH_DMA_H
#define MAX_DMA_CHANNELS 10
/* dma0 and dma1 used for network (ethernet) */
#define NETWORK_TX_DMA_NBR 0
#define NETWORK_RX_DMA_NBR 1
/* dma2 and dma3 shared by par0, scsi0, ser2 and ata */
#define PAR0_TX_DMA_NBR 2
#define PAR0_RX_DMA_NBR 3
#define SCSI0_TX_DMA_NBR 2
#define SCSI0_RX_DMA_NBR 3
#define SER2_TX_DMA_NBR 2
#define SER2_RX_DMA_NBR 3
#define ATA_TX_DMA_NBR 2
#define ATA_RX_DMA_NBR 3
/* dma4 and dma5 shared by par1, scsi1, ser3 and extdma0 */
#define PAR1_TX_DMA_NBR 4
#define PAR1_RX_DMA_NBR 5
#define SCSI1_TX_DMA_NBR 4
#define SCSI1_RX_DMA_NBR 5
#define SER3_TX_DMA_NBR 4
#define SER3_RX_DMA_NBR 5
#define EXTDMA0_TX_DMA_NBR 4
#define EXTDMA0_RX_DMA_NBR 5
/* dma6 and dma7 shared by ser0, extdma1 and mem2mem */
#define SER0_TX_DMA_NBR 6
#define SER0_RX_DMA_NBR 7
#define EXTDMA1_TX_DMA_NBR 6
#define EXTDMA1_RX_DMA_NBR 7
#define MEM2MEM_TX_DMA_NBR 6
#define MEM2MEM_RX_DMA_NBR 7
/* dma8 and dma9 shared by ser1 and usb */
#define SER1_TX_DMA_NBR 8
#define SER1_RX_DMA_NBR 9
#define USB_TX_DMA_NBR 8
#define USB_RX_DMA_NBR 9
#endif
enum dma_owner
{
dma_eth,
dma_ser0,
dma_ser1, /* Async and sync */
dma_ser2,
dma_ser3, /* Async and sync */
dma_ata,
dma_par0,
dma_par1,
dma_ext0,
dma_ext1,
dma_int6,
dma_int7,
dma_usb,
dma_scsi0,
dma_scsi1
};
/* Masks used by cris_request_dma options: */
#define DMA_VERBOSE_ON_ERROR (1<<0)
#define DMA_PANIC_ON_ERROR ((1<<1)|DMA_VERBOSE_ON_ERROR)
int cris_request_dma(unsigned int dmanr, const char * device_id,
unsigned options, enum dma_owner owner);
void cris_free_dma(unsigned int dmanr, const char * device_id);

View File

@@ -0,0 +1,81 @@
#ifndef __ASMCRIS_ARCH_ELF_H
#define __ASMCRIS_ARCH_ELF_H
#define ELF_MACH EF_CRIS_VARIANT_ANY_V0_V10
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
((x)->e_machine == EM_CRIS \
&& ((((x)->e_flags & EF_CRIS_VARIANT_MASK) == EF_CRIS_VARIANT_ANY_V0_V10 \
|| (((x)->e_flags & EF_CRIS_VARIANT_MASK) == EF_CRIS_VARIANT_COMMON_V10_V32))))
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
starts (a register; assume first param register for CRIS)
contains a pointer to a function which might be
registered using `atexit'. This provides a mean for the
dynamic linker to call DT_FINI functions for shared libraries
that have been loaded before the code runs.
A value of 0 tells we have no such handler. */
/* Explicitly set registers to 0 to increase determinism. */
#define ELF_PLAT_INIT(_r, load_addr) do { \
(_r)->r13 = 0; (_r)->r12 = 0; (_r)->r11 = 0; (_r)->r10 = 0; \
(_r)->r9 = 0; (_r)->r8 = 0; (_r)->r7 = 0; (_r)->r6 = 0; \
(_r)->r5 = 0; (_r)->r4 = 0; (_r)->r3 = 0; (_r)->r2 = 0; \
(_r)->r1 = 0; (_r)->r0 = 0; (_r)->mof = 0; (_r)->srp = 0; \
} while (0)
/* The additional layer below is because the stack pointer is missing in
the pt_regs struct, but needed in a core dump. pr_reg is a elf_gregset_t,
and should be filled in according to the layout of the user_regs_struct
struct; regs is a pt_regs struct. We dump all registers, though several are
obviously unnecessary. That way there's less need for intelligence at
the receiving end (i.e. gdb). */
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->r0; \
pr_reg[1] = regs->r1; \
pr_reg[2] = regs->r2; \
pr_reg[3] = regs->r3; \
pr_reg[4] = regs->r4; \
pr_reg[5] = regs->r5; \
pr_reg[6] = regs->r6; \
pr_reg[7] = regs->r7; \
pr_reg[8] = regs->r8; \
pr_reg[9] = regs->r9; \
pr_reg[10] = regs->r10; \
pr_reg[11] = regs->r11; \
pr_reg[12] = regs->r12; \
pr_reg[13] = regs->r13; \
pr_reg[14] = rdusp(); /* sp */ \
pr_reg[15] = regs->irp; /* pc */ \
pr_reg[16] = 0; /* p0 */ \
pr_reg[17] = rdvr(); /* vr */ \
pr_reg[18] = 0; /* p2 */ \
pr_reg[19] = 0; /* p3 */ \
pr_reg[20] = 0; /* p4 */ \
pr_reg[21] = (regs->dccr & 0xffff); /* ccr */ \
pr_reg[22] = 0; /* p6 */ \
pr_reg[23] = regs->mof; /* mof */ \
pr_reg[24] = 0; /* p8 */ \
pr_reg[25] = 0; /* ibr */ \
pr_reg[26] = 0; /* irp */ \
pr_reg[27] = regs->srp; /* srp */ \
pr_reg[28] = 0; /* bar */ \
pr_reg[29] = regs->dccr; /* dccr */ \
pr_reg[30] = 0; /* brp */ \
pr_reg[31] = rdusp(); /* usp */ \
pr_reg[32] = 0; /* csrinstr */ \
pr_reg[33] = 0; /* csraddr */ \
pr_reg[34] = 0; /* csrdata */
#endif

View File

@@ -0,0 +1,199 @@
#ifndef _ASM_ARCH_CRIS_IO_H
#define _ASM_ARCH_CRIS_IO_H
#include <arch/svinto.h>
/* Etrax shadow registers - which live in arch/cris/kernel/shadows.c */
extern unsigned long gen_config_ii_shadow;
extern unsigned long port_g_data_shadow;
extern unsigned char port_pa_dir_shadow;
extern unsigned char port_pa_data_shadow;
extern unsigned char port_pb_i2c_shadow;
extern unsigned char port_pb_config_shadow;
extern unsigned char port_pb_dir_shadow;
extern unsigned char port_pb_data_shadow;
extern unsigned long r_timer_ctrl_shadow;
extern unsigned long port_cse1_shadow;
extern unsigned long port_csp0_shadow;
extern unsigned long port_csp4_shadow;
extern volatile unsigned long *port_cse1_addr;
extern volatile unsigned long *port_csp0_addr;
extern volatile unsigned long *port_csp4_addr;
/* macro for setting regs through a shadow -
* r = register name (like R_PORT_PA_DATA)
* s = shadow name (like port_pa_data_shadow)
* b = bit number
* v = value (0 or 1)
*/
#define REG_SHADOW_SET(r,s,b,v) *r = s = (s & ~(1 << (b))) | ((v) << (b))
/* The LED's on various Etrax-based products are set differently. */
#if defined(CONFIG_ETRAX_NO_LEDS) || defined(CONFIG_SVINTO_SIM)
#undef CONFIG_ETRAX_PA_LEDS
#undef CONFIG_ETRAX_PB_LEDS
#undef CONFIG_ETRAX_CSP0_LEDS
#define CRIS_LED_NETWORK_SET_G(x)
#define CRIS_LED_NETWORK_SET_R(x)
#define CRIS_LED_ACTIVE_SET_G(x)
#define CRIS_LED_ACTIVE_SET_R(x)
#define CRIS_LED_DISK_WRITE(x)
#define CRIS_LED_DISK_READ(x)
#endif
#if !defined(CONFIG_ETRAX_CSP0_LEDS)
#define CRIS_LED_BIT_SET(x)
#define CRIS_LED_BIT_CLR(x)
#endif
#define CRIS_LED_OFF 0x00
#define CRIS_LED_GREEN 0x01
#define CRIS_LED_RED 0x02
#define CRIS_LED_ORANGE (CRIS_LED_GREEN | CRIS_LED_RED)
#if defined(CONFIG_ETRAX_NO_LEDS)
#define CRIS_LED_NETWORK_SET(x)
#else
#if CONFIG_ETRAX_LED1G == CONFIG_ETRAX_LED1R
#define CRIS_LED_NETWORK_SET(x) \
do { \
CRIS_LED_NETWORK_SET_G((x) & CRIS_LED_GREEN); \
} while (0)
#else
#define CRIS_LED_NETWORK_SET(x) \
do { \
CRIS_LED_NETWORK_SET_G((x) & CRIS_LED_GREEN); \
CRIS_LED_NETWORK_SET_R((x) & CRIS_LED_RED); \
} while (0)
#endif
#if CONFIG_ETRAX_LED2G == CONFIG_ETRAX_LED2R
#define CRIS_LED_ACTIVE_SET(x) \
do { \
CRIS_LED_ACTIVE_SET_G((x) & CRIS_LED_GREEN); \
} while (0)
#else
#define CRIS_LED_ACTIVE_SET(x) \
do { \
CRIS_LED_ACTIVE_SET_G((x) & CRIS_LED_GREEN); \
CRIS_LED_ACTIVE_SET_R((x) & CRIS_LED_RED); \
} while (0)
#endif
#endif
#ifdef CONFIG_ETRAX_PA_LEDS
#define CRIS_LED_NETWORK_SET_G(x) \
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, CONFIG_ETRAX_LED1G, !(x))
#define CRIS_LED_NETWORK_SET_R(x) \
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, CONFIG_ETRAX_LED1R, !(x))
#define CRIS_LED_ACTIVE_SET_G(x) \
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, CONFIG_ETRAX_LED2G, !(x))
#define CRIS_LED_ACTIVE_SET_R(x) \
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, CONFIG_ETRAX_LED2R, !(x))
#define CRIS_LED_DISK_WRITE(x) \
do{\
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, CONFIG_ETRAX_LED3G, !(x));\
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, CONFIG_ETRAX_LED3R, !(x));\
}while(0)
#define CRIS_LED_DISK_READ(x) \
REG_SHADOW_SET(R_PORT_PA_DATA, port_pa_data_shadow, \
CONFIG_ETRAX_LED3G, !(x))
#endif
#ifdef CONFIG_ETRAX_PB_LEDS
#define CRIS_LED_NETWORK_SET_G(x) \
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_LED1G, !(x))
#define CRIS_LED_NETWORK_SET_R(x) \
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_LED1R, !(x))
#define CRIS_LED_ACTIVE_SET_G(x) \
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_LED2G, !(x))
#define CRIS_LED_ACTIVE_SET_R(x) \
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_LED2R, !(x))
#define CRIS_LED_DISK_WRITE(x) \
do{\
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_LED3G, !(x));\
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_LED3R, !(x));\
}while(0)
#define CRIS_LED_DISK_READ(x) \
REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, \
CONFIG_ETRAX_LED3G, !(x))
#endif
#ifdef CONFIG_ETRAX_CSP0_LEDS
#define CONFIGURABLE_LEDS\
((1 << CONFIG_ETRAX_LED1G ) | (1 << CONFIG_ETRAX_LED1R ) |\
(1 << CONFIG_ETRAX_LED2G ) | (1 << CONFIG_ETRAX_LED2R ) |\
(1 << CONFIG_ETRAX_LED3G ) | (1 << CONFIG_ETRAX_LED3R ) |\
(1 << CONFIG_ETRAX_LED4G ) | (1 << CONFIG_ETRAX_LED4R ) |\
(1 << CONFIG_ETRAX_LED5G ) | (1 << CONFIG_ETRAX_LED5R ) |\
(1 << CONFIG_ETRAX_LED6G ) | (1 << CONFIG_ETRAX_LED6R ) |\
(1 << CONFIG_ETRAX_LED7G ) | (1 << CONFIG_ETRAX_LED7R ) |\
(1 << CONFIG_ETRAX_LED8Y ) | (1 << CONFIG_ETRAX_LED9Y ) |\
(1 << CONFIG_ETRAX_LED10Y ) |(1 << CONFIG_ETRAX_LED11Y )|\
(1 << CONFIG_ETRAX_LED12R ))
#define CRIS_LED_NETWORK_SET_G(x) \
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED1G, !(x))
#define CRIS_LED_NETWORK_SET_R(x) \
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED1R, !(x))
#define CRIS_LED_ACTIVE_SET_G(x) \
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED2G, !(x))
#define CRIS_LED_ACTIVE_SET_R(x) \
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED2R, !(x))
#define CRIS_LED_DISK_WRITE(x) \
do{\
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED3G, !(x));\
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED3R, !(x));\
}while(0)
#define CRIS_LED_DISK_READ(x) \
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_LED3G, !(x))
#define CRIS_LED_BIT_SET(x)\
do{\
if((( 1 << x) & CONFIGURABLE_LEDS) != 0)\
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, x, 1);\
}while(0)
#define CRIS_LED_BIT_CLR(x)\
do{\
if((( 1 << x) & CONFIGURABLE_LEDS) != 0)\
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, x, 0);\
}while(0)
#endif
#
#ifdef CONFIG_ETRAX_SOFT_SHUTDOWN
#define SOFT_SHUTDOWN() \
REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, CONFIG_ETRAX_SHUTDOWN_BIT, 1)
#else
#define SOFT_SHUTDOWN()
#endif
/* Console I/O for simulated etrax100. Use #ifdef so erroneous
use will be evident. */
#ifdef CONFIG_SVINTO_SIM
/* Let's use the ucsim interface since it lets us do write(2, ...) */
#define SIMCOUT(s,len) \
asm ("moveq 4,$r9 \n\t" \
"moveq 2,$r10 \n\t" \
"move.d %0,$r11 \n\t" \
"move.d %1,$r12 \n\t" \
"push $irp \n\t" \
"move 0f,$irp \n\t" \
"jump -6809 \n" \
"0: \n\t" \
"pop $irp" \
: : "rm" (s), "rm" (len) : "r9","r10","r11","r12","memory")
#define TRACE_ON() __extension__ \
({ int _Foofoo; __asm__ volatile ("bmod [%0],%0" : "=r" (_Foofoo) : "0" \
(255)); _Foofoo; })
#define TRACE_OFF() do { __asm__ volatile ("bmod [%0],%0" :: "r" (254)); } while (0)
#define SIM_END() do { __asm__ volatile ("bmod [%0],%0" :: "r" (28)); } while (0)
#define CRIS_CYCLES() __extension__ \
({ unsigned long c; asm ("bmod [%1],%0" : "=r" (c) : "r" (27)); c;})
#endif /* ! defined CONFIG_SVINTO_SIM */
#endif

View File

@@ -0,0 +1,75 @@
/* IO interface mux allocator for ETRAX100LX.
* Copyright 2004, Axis Communications AB
* $Id: io_interface_mux.h,v 1.1 2004/12/13 12:21:53 starvik Exp $
*/
#ifndef _IO_INTERFACE_MUX_H
#define _IO_INTERFACE_MUX_H
/* C.f. ETRAX100LX Designer's Reference 20.9 */
/* The order in enum must match the order of interfaces[] in
* io_interface_mux.c */
enum cris_io_interface {
/* Begin Non-multiplexed interfaces */
if_eth = 0,
if_serial_0,
/* End Non-multiplexed interfaces */
if_serial_1,
if_serial_2,
if_serial_3,
if_sync_serial_1,
if_sync_serial_3,
if_shared_ram,
if_shared_ram_w,
if_par_0,
if_par_1,
if_par_w,
if_scsi8_0,
if_scsi8_1,
if_scsi_w,
if_ata,
if_csp,
if_i2c,
if_usb_1,
if_usb_2,
/* GPIO pins */
if_gpio_grp_a,
if_gpio_grp_b,
if_gpio_grp_c,
if_gpio_grp_d,
if_gpio_grp_e,
if_gpio_grp_f,
if_max_interfaces,
if_unclaimed
};
int cris_request_io_interface(enum cris_io_interface ioif, const char *device_id);
void cris_free_io_interface(enum cris_io_interface ioif);
/* port can be 'a', 'b' or 'g' */
int cris_io_interface_allocate_pins(const enum cris_io_interface ioif,
const char port,
const unsigned start_bit,
const unsigned stop_bit);
/* port can be 'a', 'b' or 'g' */
int cris_io_interface_free_pins(const enum cris_io_interface ioif,
const char port,
const unsigned start_bit,
const unsigned stop_bit);
int cris_io_interface_register_watcher(void (*notify)(const unsigned int gpio_in_available,
const unsigned int gpio_out_available,
const unsigned char pa_available,
const unsigned char pb_available));
void cris_io_interface_delete_watcher(void (*notify)(const unsigned int gpio_in_available,
const unsigned int gpio_out_available,
const unsigned char pa_available,
const unsigned char pb_available));
#endif /* _IO_INTERFACE_MUX_H */

View File

@@ -0,0 +1,160 @@
/*
* Interrupt handling assembler and defines for Linux/CRISv10
*/
#ifndef _ASM_ARCH_IRQ_H
#define _ASM_ARCH_IRQ_H
#include <arch/sv_addr_ag.h>
#define NR_IRQS 32
/* The first vector number used for IRQs in v10 is really 0x20 */
/* but all the code and constants are offseted to make 0 the first */
#define FIRST_IRQ 0
#define SOME_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, some) /* 0 ? */
#define NMI_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, nmi) /* 1 */
#define TIMER0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer0) /* 2 */
#define TIMER1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer1) /* 3 */
/* mio, ata, par0, scsi0 on 4 */
/* par1, scsi1 on 5 */
#define NETWORK_STATUS_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, network) /* 6 */
#define SERIAL_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, serial) /* 8 */
#define PA_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, pa) /* 11 */
/* extdma0 and extdma1 is at irq 12 and 13 and/or same as dma5 and dma6 ? */
#define EXTDMA0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma0)
#define EXTDMA1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma1)
/* dma0-9 is irq 16..25 */
/* 16,17: network */
#define DMA0_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma0)
#define DMA1_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma1)
#define NETWORK_DMA_TX_IRQ_NBR DMA0_TX_IRQ_NBR
#define NETWORK_DMA_RX_IRQ_NBR DMA1_RX_IRQ_NBR
/* 18,19: dma2 and dma3 shared by par0, scsi0, ser2 and ata */
#define DMA2_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma2)
#define DMA3_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma3)
#define SER2_DMA_TX_IRQ_NBR DMA2_TX_IRQ_NBR
#define SER2_DMA_RX_IRQ_NBR DMA3_RX_IRQ_NBR
/* 20,21: dma4 and dma5 shared by par1, scsi1, ser3 and extdma0 */
#define DMA4_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma4)
#define DMA5_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma5)
#define SER3_DMA_TX_IRQ_NBR DMA4_TX_IRQ_NBR
#define SER3_DMA_RX_IRQ_NBR DMA5_RX_IRQ_NBR
/* 22,23: dma6 and dma7 shared by ser0, extdma1 and mem2mem */
#define DMA6_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma6)
#define DMA7_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma7)
#define SER0_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR
#define SER0_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR
#define MEM2MEM_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR
#define MEM2MEM_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR
/* 24,25: dma8 and dma9 shared by ser1 and usb */
#define DMA8_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma8)
#define DMA9_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma9)
#define SER1_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR
#define SER1_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR
#define USB_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR
#define USB_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR
/* usb: controller at irq 31 + uses DMA8 and DMA9 */
#define USB_HC_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, usb)
/* our fine, global, etrax irq vector! the pointer lives in the head.S file. */
typedef void (*irqvectptr)(void);
struct etrax_interrupt_vector {
irqvectptr v[256];
};
extern struct etrax_interrupt_vector *etrax_irv;
void set_int_vector(int n, irqvectptr addr);
void set_break_vector(int n, irqvectptr addr);
#define __STR(x) #x
#define STR(x) __STR(x)
/* SAVE_ALL saves registers so they match pt_regs */
#define SAVE_ALL \
"move $irp,[$sp=$sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \
"push $srp\n\t" /* push subroutine return pointer */ \
"push $dccr\n\t" /* push condition codes */ \
"push $mof\n\t" /* push multiply overflow reg */ \
"di\n\t" /* need to disable irq's at this point */\
"subq 14*4,$sp\n\t" /* make room for r0-r13 */ \
"movem $r13,[$sp]\n\t" /* push the r0-r13 registers */ \
"push $r10\n\t" /* push orig_r10 */ \
"clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */
/* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq */
#define BLOCK_IRQ(mask,nr) \
"move.d " #mask ",$r0\n\t" \
"move.d $r0,[0xb00000d8]\n\t"
#define UNBLOCK_IRQ(mask) \
"move.d " #mask ",$r0\n\t" \
"move.d $r0,[0xb00000dc]\n\t"
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
#define sIRQ_NAME(nr) IRQ_NAME2(sIRQ##nr)
#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)
/* the asm IRQ handler makes sure the causing IRQ is blocked, then it calls
* do_IRQ (with irq disabled still). after that it unblocks and jumps to
* ret_from_intr (entry.S)
*
* The reason the IRQ is blocked is to allow an sti() before the handler which
* will acknowledge the interrupt is run.
*/
#define BUILD_IRQ(nr,mask) \
void IRQ_NAME(nr); \
__asm__ ( \
".text\n\t" \
"IRQ" #nr "_interrupt:\n\t" \
SAVE_ALL \
BLOCK_IRQ(mask,nr) /* this must be done to prevent irq loops when we ei later */ \
"moveq "#nr",$r10\n\t" \
"move.d $sp,$r11\n\t" \
"jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \
UNBLOCK_IRQ(mask) \
"moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \
"jump ret_from_intr\n\t");
/* This is subtle. The timer interrupt is crucial and it should not be disabled for
* too long. However, if it had been a normal interrupt as per BUILD_IRQ, it would
* have been BLOCK'ed, and then softirq's are run before we return here to UNBLOCK.
* If the softirq's take too much time to run, the timer irq won't run and the
* watchdog will kill us.
*
* Furthermore, if a lot of other irq's occur before we return here, the multiple_irq
* handler is run and it prioritizes the timer interrupt. However if we had BLOCK'ed
* it here, we would not get the multiple_irq at all.
*
* The non-blocking here is based on the knowledge that the timer interrupt is
* registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not
* be an sti() before the timer irq handler is run to acknowledge the interrupt.
*/
#define BUILD_TIMER_IRQ(nr,mask) \
void IRQ_NAME(nr); \
__asm__ ( \
".text\n\t" \
"IRQ" #nr "_interrupt:\n\t" \
SAVE_ALL \
"moveq "#nr",$r10\n\t" \
"move.d $sp,$r11\n\t" \
"jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \
"moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \
"jump ret_from_intr\n\t");
#endif

View File

@@ -0,0 +1,22 @@
#ifndef _ASM_ARCH_MEMMAP_H
#define _ASM_ARCH_MEMMAP_H
#define MEM_CSE0_START (0x00000000)
#define MEM_CSE0_SIZE (0x04000000)
#define MEM_CSE1_START (0x04000000)
#define MEM_CSE1_SIZE (0x04000000)
#define MEM_CSR0_START (0x08000000)
#define MEM_CSR1_START (0x0c000000)
#define MEM_CSP0_START (0x10000000)
#define MEM_CSP1_START (0x14000000)
#define MEM_CSP2_START (0x18000000)
#define MEM_CSP3_START (0x1c000000)
#define MEM_CSP4_START (0x20000000)
#define MEM_CSP5_START (0x24000000)
#define MEM_CSP6_START (0x28000000)
#define MEM_CSP7_START (0x2c000000)
#define MEM_DRAM_START (0x40000000)
#define MEM_NON_CACHEABLE (0x80000000)
#endif

View File

@@ -0,0 +1,110 @@
/*
* CRIS MMU constants and PTE layout
*/
#ifndef _CRIS_ARCH_MMU_H
#define _CRIS_ARCH_MMU_H
/* type used in struct mm to couple an MMU context to an active mm */
typedef struct
{
unsigned int page_id;
} mm_context_t;
/* kernel memory segments */
#define KSEG_F 0xf0000000UL
#define KSEG_E 0xe0000000UL
#define KSEG_D 0xd0000000UL
#define KSEG_C 0xc0000000UL
#define KSEG_B 0xb0000000UL
#define KSEG_A 0xa0000000UL
#define KSEG_9 0x90000000UL
#define KSEG_8 0x80000000UL
#define KSEG_7 0x70000000UL
#define KSEG_6 0x60000000UL
#define KSEG_5 0x50000000UL
#define KSEG_4 0x40000000UL
#define KSEG_3 0x30000000UL
#define KSEG_2 0x20000000UL
#define KSEG_1 0x10000000UL
#define KSEG_0 0x00000000UL
/* CRIS PTE bits (see R_TLB_LO in the register description)
*
* Bit: 31 30-13 12-------4 3 2 1 0
* _______________________________________________________
* | cache |pfn | reserved | global | valid | kernel | we |
* |_______|____|__________|________|_______|________|_____|
*
* (pfn = physical frame number)
*/
/* Real HW-based PTE bits. We use some synonym names so that
* things become less confusing in combination with the SW-based
* bits further below.
*
*/
#define _PAGE_WE (1<<0) /* page is write-enabled */
#define _PAGE_SILENT_WRITE (1<<0) /* synonym */
#define _PAGE_KERNEL (1<<1) /* page is kernel only */
#define _PAGE_VALID (1<<2) /* page is valid */
#define _PAGE_SILENT_READ (1<<2) /* synonym */
#define _PAGE_GLOBAL (1<<3) /* global page - context is ignored */
#define _PAGE_NO_CACHE (1<<31) /* part of the uncached memory map */
/* Bits the HW doesn't care about but the kernel uses them in SW */
#define _PAGE_PRESENT (1<<4) /* page present in memory */
#define _PAGE_FILE (1<<5) /* set: pagecache, unset: swap (when !PRESENT) */
#define _PAGE_ACCESSED (1<<5) /* simulated in software using valid bit */
#define _PAGE_MODIFIED (1<<6) /* simulated in software using we bit */
#define _PAGE_READ (1<<7) /* read-enabled */
#define _PAGE_WRITE (1<<8) /* write-enabled */
/* Define some higher level generic page attributes. */
#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
#define _PAGE_TABLE (_PAGE_PRESENT | __READABLE | __WRITEABLE)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | __READABLE | _PAGE_WRITE | \
_PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | __READABLE) // | _PAGE_COW
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | __READABLE)
#define PAGE_KERNEL __pgprot(_PAGE_GLOBAL | _PAGE_KERNEL | \
_PAGE_PRESENT | __READABLE | __WRITEABLE)
#define _KERNPG_TABLE (_PAGE_TABLE | _PAGE_KERNEL)
/*
* CRIS can't do page protection for execute, and considers read the same.
* Also, write permissions imply read permissions. This is the closest we can
* get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
#define PTE_FILE_MAX_BITS 26
#endif

View File

@@ -0,0 +1,33 @@
#ifndef __ASM_OFFSETS_H__
#define __ASM_OFFSETS_H__
/*
* DO NOT MODIFY.
*
* This file was generated by arch/cris/Makefile
*
*/
#define PT_orig_r10 4 /* offsetof(struct pt_regs, orig_r10) */
#define PT_r13 8 /* offsetof(struct pt_regs, r13) */
#define PT_r12 12 /* offsetof(struct pt_regs, r12) */
#define PT_r11 16 /* offsetof(struct pt_regs, r11) */
#define PT_r10 20 /* offsetof(struct pt_regs, r10) */
#define PT_r9 24 /* offsetof(struct pt_regs, r9) */
#define PT_mof 64 /* offsetof(struct pt_regs, mof) */
#define PT_dccr 68 /* offsetof(struct pt_regs, dccr) */
#define PT_srp 72 /* offsetof(struct pt_regs, srp) */
#define TI_task 0 /* offsetof(struct thread_info, task) */
#define TI_flags 8 /* offsetof(struct thread_info, flags) */
#define TI_preempt_count 16 /* offsetof(struct thread_info, preempt_count) */
#define THREAD_ksp 0 /* offsetof(struct thread_struct, ksp) */
#define THREAD_usp 4 /* offsetof(struct thread_struct, usp) */
#define THREAD_dccr 8 /* offsetof(struct thread_struct, dccr) */
#define TASK_pid 141 /* offsetof(struct task_struct, pid) */
#define LCLONE_VM 256 /* CLONE_VM */
#define LCLONE_UNTRACED 8388608 /* CLONE_UNTRACED */
#endif

View File

@@ -0,0 +1,30 @@
#ifndef _CRIS_ARCH_PAGE_H
#define _CRIS_ARCH_PAGE_H
#ifdef __KERNEL__
/* This handles the memory map.. */
#ifdef CONFIG_CRIS_LOW_MAP
#define PAGE_OFFSET KSEG_6 /* kseg_6 is mapped to physical ram */
#else
#define PAGE_OFFSET KSEG_C /* kseg_c is mapped to physical ram */
#endif
/* macros to convert between really physical and virtual addresses
* by stripping a selected bit, we can convert between KSEG_x and
* 0x40000000 where the DRAM really resides
*/
#ifdef CONFIG_CRIS_LOW_MAP
/* we have DRAM virtually at 0x6 */
#define __pa(x) ((unsigned long)(x) & 0xdfffffff)
#define __va(x) ((void *)((unsigned long)(x) | 0x20000000))
#else
/* we have DRAM virtually at 0xc */
#define __pa(x) ((unsigned long)(x) & 0x7fffffff)
#define __va(x) ((void *)((unsigned long)(x) | 0x80000000))
#endif
#endif
#endif

View File

@@ -0,0 +1,17 @@
#ifndef _CRIS_ARCH_PGTABLE_H
#define _CRIS_ARCH_PGTABLE_H
/*
* Kernels own virtual memory area.
*/
#ifdef CONFIG_CRIS_LOW_MAP
#define VMALLOC_START KSEG_7
#define VMALLOC_END KSEG_8
#else
#define VMALLOC_START KSEG_D
#define VMALLOC_END KSEG_E
#endif
#endif

View File

@@ -0,0 +1,70 @@
#ifndef __ASM_CRIS_ARCH_PROCESSOR_H
#define __ASM_CRIS_ARCH_PROCESSOR_H
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({void *pc; __asm__ ("move.d $pc,%0" : "=rm" (pc)); pc; })
/* CRIS has no problems with write protection */
#define wp_works_ok 1
/* CRIS thread_struct. this really has nothing to do with the processor itself, since
* CRIS does not do any hardware task-switching, but it's here for legacy reasons.
* The thread_struct here is used when task-switching using _resume defined in entry.S.
* The offsets here are hardcoded into _resume - if you change this struct, you need to
* change them as well!!!
*/
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */
unsigned long dccr; /* saved flag register */
};
/*
* User space process size. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
#ifdef CONFIG_CRIS_LOW_MAP
#define TASK_SIZE (0x50000000UL) /* 1.25 GB */
#else
#define TASK_SIZE (0xA0000000UL) /* 2.56 GB */
#endif
#define INIT_THREAD { \
0, 0, 0x20 } /* ccr = int enable, nothing else */
#define KSTK_EIP(tsk) \
({ \
unsigned long eip = 0; \
unsigned long regs = (unsigned long)task_pt_regs(tsk); \
if (regs > PAGE_SIZE && \
virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \
eip; \
})
/* give the thread a program location
* set user-mode (The 'U' flag (User mode flag) is CCR/DCCR bit 8)
* switch user-stackpointer
*/
#define start_thread(regs, ip, usp) do { \
set_fs(USER_DS); \
regs->irp = ip; \
regs->dccr |= 1 << U_DCCR_BITNR; \
wrusp(usp); \
} while(0)
/* Called when handling a kernel bus fault fixup.
*
* After a fixup we do not want to return by restoring the CPU-state
* anymore, so switch frame-types (see ptrace.h)
*/
#define arch_fixup(regs) \
regs->frametype = CRIS_FRAME_NORMAL;
#endif

View File

@@ -0,0 +1,119 @@
#ifndef _CRIS_ARCH_PTRACE_H
#define _CRIS_ARCH_PTRACE_H
/* Frame types */
#define CRIS_FRAME_NORMAL 0 /* normal frame without SBFS stacking */
#define CRIS_FRAME_BUSFAULT 1 /* frame stacked using SBFS, need RBF return
path */
/* Register numbers in the ptrace system call interface */
#define PT_FRAMETYPE 0
#define PT_ORIG_R10 1
#define PT_R13 2
#define PT_R12 3
#define PT_R11 4
#define PT_R10 5
#define PT_R9 6
#define PT_R8 7
#define PT_R7 8
#define PT_R6 9
#define PT_R5 10
#define PT_R4 11
#define PT_R3 12
#define PT_R2 13
#define PT_R1 14
#define PT_R0 15
#define PT_MOF 16
#define PT_DCCR 17
#define PT_SRP 18
#define PT_IRP 19 /* This is actually the debugged process' PC */
#define PT_CSRINSTR 20 /* CPU Status record remnants -
valid if frametype == busfault */
#define PT_CSRADDR 21
#define PT_CSRDATA 22
#define PT_USP 23 /* special case - USP is not in the pt_regs */
#define PT_MAX 23
/* Condition code bit numbers. The same numbers apply to CCR of course,
but we use DCCR everywhere else, so let's try and be consistent. */
#define C_DCCR_BITNR 0
#define V_DCCR_BITNR 1
#define Z_DCCR_BITNR 2
#define N_DCCR_BITNR 3
#define X_DCCR_BITNR 4
#define I_DCCR_BITNR 5
#define B_DCCR_BITNR 6
#define M_DCCR_BITNR 7
#define U_DCCR_BITNR 8
#define P_DCCR_BITNR 9
#define F_DCCR_BITNR 10
/* pt_regs not only specifices the format in the user-struct during
* ptrace but is also the frame format used in the kernel prologue/epilogues
* themselves
*/
struct pt_regs {
unsigned long frametype; /* type of stackframe */
unsigned long orig_r10;
/* pushed by movem r13, [sp] in SAVE_ALL, movem pushes backwards */
unsigned long r13;
unsigned long r12;
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long r7;
unsigned long r6;
unsigned long r5;
unsigned long r4;
unsigned long r3;
unsigned long r2;
unsigned long r1;
unsigned long r0;
unsigned long mof;
unsigned long dccr;
unsigned long srp;
unsigned long irp; /* This is actually the debugged process' PC */
unsigned long csrinstr;
unsigned long csraddr;
unsigned long csrdata;
};
/* switch_stack is the extra stuff pushed onto the stack in _resume (entry.S)
* when doing a context-switch. it is used (apart from in resume) when a new
* thread is made and we need to make _resume (which is starting it for the
* first time) realise what is going on.
*
* Actually, the use is very close to the thread struct (TSS) in that both the
* switch_stack and the TSS are used to keep thread stuff when switching in
* _resume.
*/
struct switch_stack {
unsigned long r9;
unsigned long r8;
unsigned long r7;
unsigned long r6;
unsigned long r5;
unsigned long r4;
unsigned long r3;
unsigned long r2;
unsigned long r1;
unsigned long r0;
unsigned long return_ip; /* ip that _resume will return to */
};
#ifdef __KERNEL__
/* bit 8 is user-mode flag */
#define user_mode(regs) (((regs)->dccr & 0x100) != 0)
#define instruction_pointer(regs) ((regs)->irp)
#define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs *);
#endif /* __KERNEL__ */
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,139 @@
/*!**************************************************************************
*!
*! MACROS:
*! IO_MASK(reg,field)
*! IO_STATE(reg,field,state)
*! IO_EXTRACT(reg,field,val)
*! IO_STATE_VALUE(reg,field,state)
*! IO_BITNR(reg,field)
*! IO_WIDTH(reg,field)
*! IO_FIELD(reg,field,val)
*! IO_RD(reg)
*! All moderegister addresses and fields of these.
*!
*!**************************************************************************/
#ifndef __sv_addr_ag_h__
#define __sv_addr_ag_h__
#define __test_sv_addr__ 0
/*------------------------------------------------------------
!* General macros to manipulate moderegisters.
!*-----------------------------------------------------------*/
/* IO_MASK returns a mask for a specified bitfield in a register.
Note that this macro doesn't work when field width is 32 bits. */
#define IO_MASK(reg, field) IO_MASK_ (reg##_, field##_)
#define IO_MASK_(reg_, field_) \
( ( ( 1 << reg_##_##field_##_WIDTH ) - 1 ) << reg_##_##field_##_BITNR )
/* IO_STATE returns a constant corresponding to a one of the symbolic
states that the bitfield can have. (Shifted to correct position) */
#define IO_STATE(reg, field, state) IO_STATE_ (reg##_, field##_, _##state)
#define IO_STATE_(reg_, field_, _state) \
( reg_##_##field_##_state << reg_##_##field_##_BITNR )
/* IO_EXTRACT returns the masked and shifted value corresponding to the
bitfield can have. */
#define IO_EXTRACT(reg, field, val) IO_EXTRACT_ (reg##_, field##_, val)
#define IO_EXTRACT_(reg_, field_, val) ( (( ( ( 1 << reg_##_##field_##_WIDTH ) \
- 1 ) << reg_##_##field_##_BITNR ) & (val)) >> reg_##_##field_##_BITNR )
/* IO_STATE_VALUE returns a constant corresponding to a one of the symbolic
states that the bitfield can have. (Not shifted) */
#define IO_STATE_VALUE(reg, field, state) \
IO_STATE_VALUE_ (reg##_, field##_, _##state)
#define IO_STATE_VALUE_(reg_, field_, _state) ( reg_##_##field_##_state )
/* IO_FIELD shifts the val parameter to be aligned with the bitfield
specified. */
#define IO_FIELD(reg, field, val) IO_FIELD_ (reg##_, field##_, val)
#define IO_FIELD_(reg_, field_, val) ((val) << reg_##_##field_##_BITNR)
/* IO_BITNR returns the starting bitnumber of a bitfield. Bit 0 is
LSB and the returned bitnumber is LSB of the field. */
#define IO_BITNR(reg, field) IO_BITNR_ (reg##_, field##_)
#define IO_BITNR_(reg_, field_) (reg_##_##field_##_BITNR)
/* IO_WIDTH returns the width, in bits, of a bitfield. */
#define IO_WIDTH(reg, field) IO_WIDTH_ (reg##_, field##_)
#define IO_WIDTH_(reg_, field_) (reg_##_##field_##_WIDTH)
/*--- Obsolete. Kept for backw compatibility. ---*/
/* Reads (or writes) a byte/uword/udword from the specified mode
register. */
#define IO_RD(reg) (*(volatile u32*)(reg))
#define IO_RD_B(reg) (*(volatile u8*)(reg))
#define IO_RD_W(reg) (*(volatile u16*)(reg))
#define IO_RD_D(reg) (*(volatile u32*)(reg))
/*------------------------------------------------------------
!* Start addresses of the different memory areas.
!*-----------------------------------------------------------*/
#define MEM_CSE0_START (0x00000000)
#define MEM_CSE0_SIZE (0x04000000)
#define MEM_CSE1_START (0x04000000)
#define MEM_CSE1_SIZE (0x04000000)
#define MEM_CSR0_START (0x08000000)
#define MEM_CSR1_START (0x0c000000)
#define MEM_CSP0_START (0x10000000)
#define MEM_CSP1_START (0x14000000)
#define MEM_CSP2_START (0x18000000)
#define MEM_CSP3_START (0x1c000000)
#define MEM_CSP4_START (0x20000000)
#define MEM_CSP5_START (0x24000000)
#define MEM_CSP6_START (0x28000000)
#define MEM_CSP7_START (0x2c000000)
#define MEM_DRAM_START (0x40000000)
#define MEM_NON_CACHEABLE (0x80000000)
/*------------------------------------------------------------
!* Type casts used in mode register macros, making pointer
!* dereferencing possible. Empty in assembler.
!*-----------------------------------------------------------*/
#ifndef __ASSEMBLER__
# define IO_TYPECAST_UDWORD (volatile u32*)
# define IO_TYPECAST_RO_UDWORD (const volatile u32*)
# define IO_TYPECAST_UWORD (volatile u16*)
# define IO_TYPECAST_RO_UWORD (const volatile u16*)
# define IO_TYPECAST_BYTE (volatile u8*)
# define IO_TYPECAST_RO_BYTE (const volatile u8*)
#else
# define IO_TYPECAST_UDWORD
# define IO_TYPECAST_RO_UDWORD
# define IO_TYPECAST_UWORD
# define IO_TYPECAST_RO_UWORD
# define IO_TYPECAST_BYTE
# define IO_TYPECAST_RO_BYTE
#endif
/*------------------------------------------------------------*/
#include "sv_addr.agh"
#if __test_sv_addr__
/* IO_MASK( R_BUS_CONFIG , CE ) */
IO_MASK( R_WAITSTATES , SRAM_WS )
IO_MASK( R_TEST , W32 )
IO_STATE( R_BUS_CONFIG, CE, DISABLE )
IO_STATE( R_BUS_CONFIG, CE, ENABLE )
IO_STATE( R_DRAM_TIMING, REF, IVAL2 )
IO_MASK( R_DRAM_TIMING, REF )
IO_MASK( R_EXT_DMA_0_STAT, TFR_COUNT ) >> IO_BITNR( R_EXT_DMA_0_STAT, TFR_COUNT )
IO_RD(R_EXT_DMA_0_STAT) & IO_MASK( R_EXT_DMA_0_STAT, S )
== IO_STATE( R_EXT_DMA_0_STAT, S, STARTED )
#endif
#endif /* ifndef __sv_addr_ag_h__ */

View File

@@ -0,0 +1,64 @@
#ifndef _ASM_CRIS_SVINTO_H
#define _ASM_CRIS_SVINTO_H
#include "sv_addr_ag.h"
extern unsigned int genconfig_shadow; /* defined and set in head.S */
/* dma stuff */
enum { /* Available in: */
d_eol = (1 << 0), /* flags */
d_eop = (1 << 1), /* flags & status */
d_wait = (1 << 2), /* flags */
d_int = (1 << 3), /* flags */
d_txerr = (1 << 4), /* flags */
d_stop = (1 << 4), /* status */
d_ecp = (1 << 4), /* flags & status */
d_pri = (1 << 5), /* flags & status */
d_alignerr = (1 << 6), /* status */
d_crcerr = (1 << 7) /* status */
};
/* Do remember that DMA does not go through the MMU and needs
* a real physical address, not an address virtually mapped or
* paged. Therefore the buf/next ptrs below are unsigned long instead
* of void * to give a warning if you try to put a pointer directly
* to them instead of going through virt_to_phys/phys_to_virt.
*/
typedef struct etrax_dma_descr {
unsigned short sw_len; /* 0-1 */
unsigned short ctrl; /* 2-3 */
unsigned long next; /* 4-7 */
unsigned long buf; /* 8-11 */
unsigned short hw_len; /* 12-13 */
unsigned char status; /* 14 */
unsigned char fifo_len; /* 15 */
} etrax_dma_descr;
/* Use this for constant numbers only */
#define RESET_DMA_NUM( n ) \
*R_DMA_CH##n##_CMD = IO_STATE( R_DMA_CH0_CMD, cmd, reset )
/* Use this for constant numbers or symbols,
* having two macros makes it possible to use constant expressions.
*/
#define RESET_DMA( n ) RESET_DMA_NUM( n )
/* Use this for constant numbers only */
#define WAIT_DMA_NUM( n ) \
while( (*R_DMA_CH##n##_CMD & IO_MASK( R_DMA_CH0_CMD, cmd )) != \
IO_STATE( R_DMA_CH0_CMD, cmd, hold ) )
/* Use this for constant numbers or symbols
* having two macros makes it possible to use constant expressions.
*/
#define WAIT_DMA( n ) WAIT_DMA_NUM( n )
extern void prepare_rx_descriptor(struct etrax_dma_descr *desc);
extern void flush_etrax_cache(void);
#endif

View File

@@ -0,0 +1,30 @@
#ifndef _CRIS_ARCH_SWAB_H
#define _CRIS_ARCH_SWAB_H
#include <asm/types.h>
#include <linux/compiler.h>
#define __SWAB_64_THRU_32__
/* we just define these two (as we can do the swap in a single
* asm instruction in CRIS) and the arch-independent files will put
* them together into ntohl etc.
*/
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__ ("swapwb %0" : "=r" (x) : "0" (x));
return(x);
}
#define __arch_swab32 __arch_swab32
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__ ("swapb %0" : "=r" (x) : "0" (x));
return(x);
}
#define __arch_swab16 __arch_swab16
#endif

View File

@@ -0,0 +1,63 @@
#ifndef __ASM_CRIS_ARCH_SYSTEM_H
#define __ASM_CRIS_ARCH_SYSTEM_H
/* read the CPU version register */
static inline unsigned long rdvr(void) {
unsigned char vr;
__asm__ volatile ("move $vr,%0" : "=rm" (vr));
return vr;
}
#define cris_machine_name "cris"
/* read/write the user-mode stackpointer */
static inline unsigned long rdusp(void) {
unsigned long usp;
__asm__ __volatile__("move $usp,%0" : "=rm" (usp));
return usp;
}
#define wrusp(usp) \
__asm__ __volatile__("move %0,$usp" : /* no outputs */ : "rm" (usp))
/* read the current stackpointer */
static inline unsigned long rdsp(void) {
unsigned long sp;
__asm__ __volatile__("move.d $sp,%0" : "=rm" (sp));
return sp;
}
static inline unsigned long _get_base(char * addr)
{
return 0;
}
#define nop() __asm__ __volatile__ ("nop");
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
/* interrupt control.. */
#define local_save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
#define local_irq_restore(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
#define local_irq_disable() __asm__ __volatile__ ( "di" : : :"memory");
#define local_irq_enable() __asm__ __volatile__ ( "ei" : : :"memory");
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & (1<<5)); \
})
/* For spinlocks etc */
#define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory");
#endif

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_ARCH_THREAD_INFO_H
#define _ASM_ARCH_THREAD_INFO_H
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("and.d $sp,%0; ":"=r" (ti) : "0" (~8191UL));
return ti;
}
#endif

View File

@@ -0,0 +1,30 @@
/*
* Use prescale timer at 25000 Hz instead of the baudrate timer at
* 19200 to get rid of the 64ppm to fast timer (and we get better
* resolution within a jiffie as well.
*/
#ifndef _ASM_CRIS_ARCH_TIMEX_H
#define _ASM_CRIS_ARCH_TIMEX_H
/* The prescaler clock runs at 25MHz, we divide it by 1000 in the prescaler */
/* If you change anything here you must check time.c as well... */
#define PRESCALE_FREQ 25000000
#define PRESCALE_VALUE 1000
#define CLOCK_TICK_RATE 25000 /* Underlying frequency of the HZ timer */
/* The timer0 values gives 40us resolution (1/25000) but interrupts at HZ*/
#define TIMER0_FREQ (CLOCK_TICK_RATE)
#define TIMER0_CLKSEL flexible
#define TIMER0_DIV (TIMER0_FREQ/(HZ))
#define GET_JIFFIES_USEC() \
( (TIMER0_DIV - *R_TIMER0_DATA) * (1000000/HZ)/TIMER0_DIV )
unsigned long get_ns_in_jiffie(void);
static inline unsigned long get_us_in_jiffie_highres(void)
{
return get_ns_in_jiffie()/1000;
}
#endif

View File

@@ -0,0 +1,13 @@
#ifndef _CRIS_ARCH_TLB_H
#define _CRIS_ARCH_TLB_H
/* The TLB can host up to 64 different mm contexts at the same time.
* The last page_id is never running - it is used as an invalid page_id
* so we can make TLB entries that will never match.
*/
#define NUM_TLB_ENTRIES 64
#define NUM_PAGEID 64
#define INVALID_PAGEID 63
#define NO_CONTEXT -1
#endif

View File

@@ -0,0 +1,660 @@
/*
* Authors: Bjorn Wesen (bjornw@axis.com)
* Hans-Peter Nilsson (hp@axis.com)
*
*/
#ifndef _CRIS_ARCH_UACCESS_H
#define _CRIS_ARCH_UACCESS_H
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*
* Note that PC at a fault is the address *after* the faulting
* instruction.
*/
#define __put_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
" "op" %1,[%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3: move.d %3,%0\n" \
" jump 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .dword 2b,3b\n" \
" .previous\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
#define __put_user_asm_64(x, addr, err) \
__asm__ __volatile__( \
" move.d %M1,[%2]\n" \
"2: move.d %H1,[%2+4]\n" \
"4:\n" \
" .section .fixup,\"ax\"\n" \
"3: move.d %3,%0\n" \
" jump 4b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .dword 2b,3b\n" \
" .dword 4b,3b\n" \
" .previous\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
/* See comment before __put_user_asm. */
#define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
" "op" [%2],%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3: move.d %3,%0\n" \
" moveq 0,%1\n" \
" jump 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .dword 2b,3b\n" \
" .previous\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "g" (-EFAULT), "0" (err))
#define __get_user_asm_64(x, addr, err) \
__asm__ __volatile__( \
" move.d [%2],%M1\n" \
"2: move.d [%2+4],%H1\n" \
"4:\n" \
" .section .fixup,\"ax\"\n" \
"3: move.d %3,%0\n" \
" moveq 0,%1\n" \
" jump 4b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .dword 2b,3b\n" \
" .dword 4b,3b\n" \
" .previous\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "g" (-EFAULT), "0" (err))
/*
* Copy a null terminated string from userspace.
*
* Must return:
* -EFAULT for an exception
* count if we hit the buffer limit
* bytes copied if we hit a null byte
* (without the null byte)
*/
static inline long
__do_strncpy_from_user(char *dst, const char *src, long count)
{
long res;
if (count == 0)
return 0;
/*
* Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
* So do we.
*
* This code is deduced from:
*
* char tmp2;
* long tmp1, tmp3
* tmp1 = count;
* while ((*dst++ = (tmp2 = *src++)) != 0
* && --tmp1)
* ;
*
* res = count - tmp1;
*
* with tweaks.
*/
__asm__ __volatile__ (
" move.d %3,%0\n"
" move.b [%2+],$r9\n"
"1: beq 2f\n"
" move.b $r9,[%1+]\n"
" subq 1,%0\n"
" bne 1b\n"
" move.b [%2+],$r9\n"
"2: sub.d %3,%0\n"
" neg.d %0,%0\n"
"3:\n"
" .section .fixup,\"ax\"\n"
"4: move.d %7,%0\n"
" jump 3b\n"
/* There's one address for a fault at the first move, and
two possible PC values for a fault at the second move,
being a delay-slot filler. However, the branch-target
for the second move is the same as the first address.
Just so you don't get confused... */
" .previous\n"
" .section __ex_table,\"a\"\n"
" .dword 1b,4b\n"
" .dword 2b,4b\n"
" .previous"
: "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
: "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
: "r9");
return res;
}
/* A few copy asms to build up the more complex ones from.
Note again, a post-increment is performed regardless of whether a bus
fault occurred in that instruction, and PC for a faulted insn is the
address *after* the insn. */
#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm__ __volatile__ ( \
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
FIXUP \
" jump 1b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
TENTRY \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret) \
: "0" (to), "1" (from), "2" (ret) \
: "r9", "memory")
#define __asm_copy_from_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"2: move.b $r9,[%0+]\n", \
"3: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
"2: move.w $r9,[%0+]\n" COPY, \
"3: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
__asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_3(to, from, ret) \
__asm_copy_from_user_2x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \
"5: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"2: move.d $r9,[%0+]\n" COPY, \
"3: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_5(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \
"5: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
"4: move.w $r9,[%0+]\n" COPY, \
"5: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_copy_from_user_6(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_7(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"6: move.b $r9,[%0+]\n", \
"7: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"4: move.d $r9,[%0+]\n" COPY, \
"5: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_copy_from_user_8(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_9(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"6: move.b $r9,[%0+]\n", \
"7: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
"6: move.w $r9,[%0+]\n" COPY, \
"7: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_copy_from_user_10(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_11(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"8: move.b $r9,[%0+]\n", \
"9: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"6: move.d $r9,[%0+]\n" COPY, \
"7: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_copy_from_user_12(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_13(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"8: move.b $r9,[%0+]\n", \
"9: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
"8: move.w $r9,[%0+]\n" COPY, \
"9: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_copy_from_user_14(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_15(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"10: move.b $r9,[%0+]\n", \
"11: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"8: move.d $r9,[%0+]\n" COPY, \
"9: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_copy_from_user_16(to, from, ret) \
__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_16x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"10: move.d $r9,[%0+]\n" COPY, \
"11: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 10b,11b\n" TENTRY)
#define __asm_copy_from_user_20(to, from, ret) \
__asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_20x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"12: move.d $r9,[%0+]\n" COPY, \
"13: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 12b,13b\n" TENTRY)
#define __asm_copy_from_user_24(to, from, ret) \
__asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
/* And now, the to-user ones. */
#define __asm_copy_to_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n2:\n", \
"3: addq 1,%2\n", \
" .dword 2b,3b\n")
#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
" move.w $r9,[%0+]\n2:\n" COPY, \
"3: addq 2,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_to_user_2(to, from, ret) \
__asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_3(to, from, ret) \
__asm_copy_to_user_2x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n4:\n", \
"5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
" move.d $r9,[%0+]\n2:\n" COPY, \
"3: addq 4,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_to_user_4(to, from, ret) \
__asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_5(to, from, ret) \
__asm_copy_to_user_4x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n4:\n", \
"5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_4x_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
" move.w $r9,[%0+]\n4:\n" COPY, \
"5: addq 2,%2\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_copy_to_user_6(to, from, ret) \
__asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_7(to, from, ret) \
__asm_copy_to_user_6x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n6:\n", \
"7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_4x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
" move.d $r9,[%0+]\n4:\n" COPY, \
"5: addq 4,%2\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_copy_to_user_8(to, from, ret) \
__asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_9(to, from, ret) \
__asm_copy_to_user_8x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n6:\n", \
"7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_8x_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
" move.w $r9,[%0+]\n6:\n" COPY, \
"7: addq 2,%2\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_copy_to_user_10(to, from, ret) \
__asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_11(to, from, ret) \
__asm_copy_to_user_10x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n8:\n", \
"9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_8x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
" move.d $r9,[%0+]\n6:\n" COPY, \
"7: addq 4,%2\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_copy_to_user_12(to, from, ret) \
__asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_13(to, from, ret) \
__asm_copy_to_user_12x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n8:\n", \
"9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_12x_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
" move.w $r9,[%0+]\n8:\n" COPY, \
"9: addq 2,%2\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_copy_to_user_14(to, from, ret) \
__asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_15(to, from, ret) \
__asm_copy_to_user_14x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
" move.b $r9,[%0+]\n10:\n", \
"11: addq 1,%2\n", \
" .dword 10b,11b\n")
#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_12x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
" move.d $r9,[%0+]\n8:\n" COPY, \
"9: addq 4,%2\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_copy_to_user_16(to, from, ret) \
__asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_16x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
" move.d $r9,[%0+]\n10:\n" COPY, \
"11: addq 4,%2\n" FIXUP, \
" .dword 10b,11b\n" TENTRY)
#define __asm_copy_to_user_20(to, from, ret) \
__asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_20x_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
" move.d $r9,[%0+]\n12:\n" COPY, \
"13: addq 4,%2\n" FIXUP, \
" .dword 12b,13b\n" TENTRY)
#define __asm_copy_to_user_24(to, from, ret) \
__asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
/* Define a few clearing asms with exception handlers. */
/* This frame-asm is like the __asm_copy_user_cont one, but has one less
input. */
#define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
__asm__ __volatile__ ( \
CLEAR \
"1:\n" \
" .section .fixup,\"ax\"\n" \
FIXUP \
" jump 1b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
TENTRY \
" .previous" \
: "=r" (to), "=r" (ret) \
: "0" (to), "1" (ret) \
: "memory")
#define __asm_clear_1(to, ret) \
__asm_clear(to, ret, \
" clear.b [%0+]\n2:\n", \
"3: addq 1,%1\n", \
" .dword 2b,3b\n")
#define __asm_clear_2(to, ret) \
__asm_clear(to, ret, \
" clear.w [%0+]\n2:\n", \
"3: addq 2,%1\n", \
" .dword 2b,3b\n")
#define __asm_clear_3(to, ret) \
__asm_clear(to, ret, \
" clear.w [%0+]\n" \
"2: clear.b [%0+]\n3:\n", \
"4: addq 2,%1\n" \
"5: addq 1,%1\n", \
" .dword 2b,4b\n" \
" .dword 3b,5b\n")
#define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
__asm_clear(to, ret, \
" clear.d [%0+]\n2:\n" CLEAR, \
"3: addq 4,%1\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_clear_4(to, ret) \
__asm_clear_4x_cont(to, ret, "", "", "")
#define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
__asm_clear_4x_cont(to, ret, \
" clear.d [%0+]\n4:\n" CLEAR, \
"5: addq 4,%1\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_clear_8(to, ret) \
__asm_clear_8x_cont(to, ret, "", "", "")
#define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
__asm_clear_8x_cont(to, ret, \
" clear.d [%0+]\n6:\n" CLEAR, \
"7: addq 4,%1\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_clear_12(to, ret) \
__asm_clear_12x_cont(to, ret, "", "", "")
#define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
__asm_clear_12x_cont(to, ret, \
" clear.d [%0+]\n8:\n" CLEAR, \
"9: addq 4,%1\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_clear_16(to, ret) \
__asm_clear_16x_cont(to, ret, "", "", "")
#define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
__asm_clear_16x_cont(to, ret, \
" clear.d [%0+]\n10:\n" CLEAR, \
"11: addq 4,%1\n" FIXUP, \
" .dword 10b,11b\n" TENTRY)
#define __asm_clear_20(to, ret) \
__asm_clear_20x_cont(to, ret, "", "", "")
#define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
__asm_clear_20x_cont(to, ret, \
" clear.d [%0+]\n12:\n" CLEAR, \
"13: addq 4,%1\n" FIXUP, \
" .dword 12b,13b\n" TENTRY)
#define __asm_clear_24(to, ret) \
__asm_clear_24x_cont(to, ret, "", "", "")
/*
* Return the size of a string (including the ending 0)
*
* Return length of string in userspace including terminating 0
* or 0 for error. Return a value greater than N if too long.
*/
static inline long
strnlen_user(const char *s, long n)
{
long res, tmp1;
if (!access_ok(VERIFY_READ, s, 0))
return 0;
/*
* This code is deduced from:
*
* tmp1 = n;
* while (tmp1-- > 0 && *s++)
* ;
*
* res = n - tmp1;
*
* (with tweaks).
*/
__asm__ __volatile__ (
" move.d %1,$r9\n"
"0:\n"
" ble 1f\n"
" subq 1,$r9\n"
" test.b [%0+]\n"
" bne 0b\n"
" test.d $r9\n"
"1:\n"
" move.d %1,%0\n"
" sub.d $r9,%0\n"
"2:\n"
" .section .fixup,\"ax\"\n"
"3: clear.d %0\n"
" jump 2b\n"
/* There's one address for a fault at the first move, and
two possible PC values for a fault at the second move,
being a delay-slot filler. However, the branch-target
for the second move is the same as the first address.
Just so you don't get confused... */
" .previous\n"
" .section __ex_table,\"a\"\n"
" .dword 0b,3b\n"
" .dword 1b,3b\n"
" .previous\n"
: "=r" (res), "=r" (tmp1)
: "0" (s), "1" (n)
: "r9");
return res;
}
#endif

View File

@@ -0,0 +1,148 @@
#ifndef _ASM_CRIS_ARCH_UNISTD_H_
#define _ASM_CRIS_ARCH_UNISTD_H_
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
/*
* Don't remove the .ifnc tests; they are an insurance against
* any hard-to-spot gcc register allocation bugs.
*/
#define _syscall0(type,name) \
type name(void) \
{ \
register long __a __asm__ ("r10"); \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1,$r10$r9\n\t" \
".err\n\t" \
".endif\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_)); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
register long __a __asm__ ("r10") = (long) arg1; \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1,$r10$r9\n\t" \
".err\n\t" \
".endif\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_), "0" (__a)); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1,type2 arg2) \
{ \
register long __a __asm__ ("r10") = (long) arg1; \
register long __b __asm__ ("r11") = (long) arg2; \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1%3,$r10$r9$r11\n\t" \
".err\n\t" \
".endif\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_), "0" (__a), "r" (__b)); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
register long __a __asm__ ("r10") = (long) arg1; \
register long __b __asm__ ("r11") = (long) arg2; \
register long __c __asm__ ("r12") = (long) arg3; \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1%3%4,$r10$r9$r11$r12\n\t" \
".err\n\t" \
".endif\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_), "0" (__a), "r" (__b), "r" (__c)); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
register long __a __asm__ ("r10") = (long) arg1; \
register long __b __asm__ ("r11") = (long) arg2; \
register long __c __asm__ ("r12") = (long) arg3; \
register long __d __asm__ ("r13") = (long) arg4; \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1%3%4%5,$r10$r9$r11$r12$r13\n\t" \
".err\n\t" \
".endif\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_), "0" (__a), "r" (__b), \
"r" (__c), "r" (__d)); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5) \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
register long __a __asm__ ("r10") = (long) arg1; \
register long __b __asm__ ("r11") = (long) arg2; \
register long __c __asm__ ("r12") = (long) arg3; \
register long __d __asm__ ("r13") = (long) arg4; \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1%3%4%5,$r10$r9$r11$r12$r13\n\t" \
".err\n\t" \
".endif\n\t" \
"move %6,$mof\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_), "0" (__a), "r" (__b), \
"r" (__c), "r" (__d), "g" (arg5)); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5,type6,arg6) \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
{ \
register long __a __asm__ ("r10") = (long) arg1; \
register long __b __asm__ ("r11") = (long) arg2; \
register long __c __asm__ ("r12") = (long) arg3; \
register long __d __asm__ ("r13") = (long) arg4; \
register long __n_ __asm__ ("r9") = (__NR_##name); \
__asm__ __volatile__ (".ifnc %0%1%3%4%5,$r10$r9$r11$r12$r13\n\t" \
".err\n\t" \
".endif\n\t" \
"move %6,$mof\n\tmove %7,$srp\n\t" \
"break 13" \
: "=r" (__a) \
: "r" (__n_), "0" (__a), "r" (__b), \
"r" (__c), "r" (__d), "g" (arg5), "g" (arg6)\
: "srp"); \
if (__a >= 0) \
return (type) __a; \
errno = -__a; \
return (type) -1; \
}
#endif

View File

@@ -0,0 +1,46 @@
#ifndef __ASM_CRIS_ARCH_USER_H
#define __ASM_CRIS_ARCH_USER_H
/* User mode registers, used for core dumps. In order to keep ELF_NGREG
sensible we let all registers be 32 bits. The csr registers are included
for future use. */
struct user_regs_struct {
unsigned long r0; /* General registers. */
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long sp; /* Stack pointer. */
unsigned long pc; /* Program counter. */
unsigned long p0; /* Constant zero (only 8 bits). */
unsigned long vr; /* Version register (only 8 bits). */
unsigned long p2; /* Reserved. */
unsigned long p3; /* Reserved. */
unsigned long p4; /* Constant zero (only 16 bits). */
unsigned long ccr; /* Condition code register (only 16 bits). */
unsigned long p6; /* Reserved. */
unsigned long mof; /* Multiply overflow register. */
unsigned long p8; /* Constant zero. */
unsigned long ibr; /* Not accessible. */
unsigned long irp; /* Not accessible. */
unsigned long srp; /* Subroutine return pointer. */
unsigned long bar; /* Not accessible. */
unsigned long dccr; /* Dword condition code register. */
unsigned long brp; /* Not accessible. */
unsigned long usp; /* User-mode stack pointer. Same as sp when
in user mode. */
unsigned long csrinstr; /* Internal status registers. */
unsigned long csraddr;
unsigned long csrdata;
};
#endif