add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,14 @@
#
# Makefile for the MN10300-specific memory management code
#
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o
ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
obj-y += cache.o cache-mn10300.o
ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
obj-y += cache-flush-mn10300.o
endif
endif

View File

@@ -0,0 +1,192 @@
/* MN10300 CPU core caching routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
.am33_2
.globl mn10300_dcache_flush
.globl mn10300_dcache_flush_page
.globl mn10300_dcache_flush_range
.globl mn10300_dcache_flush_range2
.globl mn10300_dcache_flush_inv
.globl mn10300_dcache_flush_inv_page
.globl mn10300_dcache_flush_inv_range
.globl mn10300_dcache_flush_inv_range2
###############################################################################
#
# void mn10300_dcache_flush(void)
# Flush the entire data cache back to RAM
#
###############################################################################
ALIGN
mn10300_dcache_flush:
movhu (CHCTR),d0
btst CHCTR_DCEN,d0
beq mn10300_dcache_flush_end
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
mn10300_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
mn10300_dcache_flush_skip:
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,d1
bne mn10300_dcache_flush_loop
mn10300_dcache_flush_end:
ret [],0
###############################################################################
#
# void mn10300_dcache_flush_page(unsigned start)
# void mn10300_dcache_flush_range(unsigned start, unsigned end)
# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
# Flush a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
mn10300_dcache_flush_page:
mov PAGE_SIZE,d1
mn10300_dcache_flush_range2:
add d0,d1
mn10300_dcache_flush_range:
movm [d2,d3],(sp)
movhu (CHCTR),d2
btst CHCTR_DCEN,d2
beq mn10300_dcache_flush_range_end
# round start addr down
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
# write a request to flush all instances of an address from the cache
mov DCACHE_PURGE(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache purge control
# reg address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
# cache
mn10300_dcache_flush_range_loop:
mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
# all ways
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
add -1,d1
bne mn10300_dcache_flush_range_loop
mn10300_dcache_flush_range_end:
ret [d2,d3],8
###############################################################################
#
# void mn10300_dcache_flush_inv(void)
# Flush the entire data cache and invalidate all entries
#
###############################################################################
ALIGN
mn10300_dcache_flush_inv:
movhu (CHCTR),d0
btst CHCTR_DCEN,d0
beq mn10300_dcache_flush_inv_end
# hit each line in the dcache with an unconditional purge
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
mn10300_dcache_flush_inv_loop:
mov (a1),d0 # unconditional purge
add L1_CACHE_BYTES,a1
add -1,d1
bne mn10300_dcache_flush_inv_loop
mn10300_dcache_flush_inv_end:
ret [],0
###############################################################################
#
# void mn10300_dcache_flush_inv_page(unsigned start)
# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
# Flush and invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
mn10300_dcache_flush_inv_page:
mov PAGE_SIZE,d1
mn10300_dcache_flush_inv_range2:
add d0,d1
mn10300_dcache_flush_inv_range:
movm [d2,d3],(sp)
movhu (CHCTR),d2
btst CHCTR_DCEN,d2
beq mn10300_dcache_flush_inv_range_end
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
# write a request to flush and invalidate all instances of an address
# from the cache
mov DCACHE_PURGE(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache purge control
# reg address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
mn10300_dcache_flush_inv_range_loop:
mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
# in all ways
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
add -1,d1
bne mn10300_dcache_flush_inv_range_loop
mn10300_dcache_flush_inv_range_end:
ret [d2,d3],8

View File

@@ -0,0 +1,289 @@
/* MN10300 CPU core caching routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#define mn10300_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
#if mn10300_dcache_inv_range_intr_interval > 0xff
#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
#endif
.am33_2
.globl mn10300_icache_inv
.globl mn10300_dcache_inv
.globl mn10300_dcache_inv_range
.globl mn10300_dcache_inv_range2
.globl mn10300_dcache_inv_page
###############################################################################
#
# void mn10300_icache_inv(void)
# Invalidate the entire icache
#
###############################################################################
ALIGN
mn10300_icache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_icache_inv_end
mov epsw,d1
and ~EPSW_IE,epsw
nop
nop
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
and ~CHCTR_ICINV,d0
or CHCTR_ICEN,d0
movhu d0,(a0)
movhu (a0),d0
mov d1,epsw
mn10300_icache_inv_end:
ret [],0
###############################################################################
#
# void mn10300_dcache_inv(void)
# Invalidate the entire dcache
#
###############################################################################
ALIGN
mn10300_dcache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_DCEN,d0
beq mn10300_dcache_inv_end
mov epsw,d1
and ~EPSW_IE,epsw
nop
nop
# disable the dcache
and ~CHCTR_DCEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
and ~CHCTR_DCINV,d0
or CHCTR_DCEN,d0
movhu d0,(a0)
movhu (a0),d0
mov d1,epsw
mn10300_dcache_inv_end:
ret [],0
###############################################################################
#
# void mn10300_dcache_inv_range(unsigned start, unsigned end)
# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
# void mn10300_dcache_inv_page(unsigned start)
# Invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
mn10300_dcache_inv_page:
mov PAGE_SIZE,d1
mn10300_dcache_inv_range2:
add d0,d1
mn10300_dcache_inv_range:
movm [d2,d3,a2],(sp)
mov CHCTR,a2
movhu (a2),d2
btst CHCTR_DCEN,d2
beq mn10300_dcache_inv_range_end
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
clr d2 # we're going to clear tag ram
# entries
# read the tags from the tag RAM, and if they indicate a valid dirty
# cache line then invalidate that line
mov DCACHE_TAG(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache tag RAM
# access address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
mn10300_dcache_inv_range_outer_loop:
# disable interrupts
mov epsw,d3
and ~EPSW_IE,epsw
nop # note that reading CHCTR and
# AND'ing D0 occupy two delay
# slots after disabling
# interrupts
# disable the dcache
movhu (a2),d0
and ~CHCTR_DCEN,d0
movhu d0,(a2)
# and wait for it to calm down
setlb
movhu (a2),d0
btst CHCTR_DCBUSY,d0
lne
mn10300_dcache_inv_range_loop:
# process the way 0 slot
mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
# valid
xor a1,d0
lsr 12,d0
bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
mov d2,(a0) # kill the tag
mn10300_dcache_inv_range_skip_0:
# process the way 1 slot
mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
# valid
xor a1,d0
lsr 12,d0
bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
mov d2,(a0) # kill the tag
mn10300_dcache_inv_range_skip_1:
# process the way 2 slot
mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
# valid
xor a1,d0
lsr 12,d0
bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
mov d2,(a0) # kill the tag
mn10300_dcache_inv_range_skip_2:
# process the way 3 slot
mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
# valid
xor a1,d0
lsr 12,d0
bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
mov d2,(a0) # kill the tag
mn10300_dcache_inv_range_skip_3:
# approx every N steps we re-enable the cache and see if there are any
# interrupts to be processed
# we also break out if we've reached the end of the loop
# (the bottom nibble of the count is zero in both cases)
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,d1
btst mn10300_dcache_inv_range_intr_interval,d1
bne mn10300_dcache_inv_range_loop
# wait for the cache to finish what it's doing
setlb
movhu (a2),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
or CHCTR_DCEN,d0
movhu d0,(a2)
movhu (a2),d0
# re-enable interrupts
# - we don't bother with delay NOPs as we'll have enough instructions
# before we disable interrupts again to give the interrupts a chance
# to happen
mov d3,epsw
# go around again if the counter hasn't yet reached zero
add 0,d1
bne mn10300_dcache_inv_range_outer_loop
mn10300_dcache_inv_range_end:
ret [d2,d3,a2],12

View File

@@ -0,0 +1,121 @@
/* MN10300 Cache flushing routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
EXPORT_SYMBOL(mn10300_icache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
EXPORT_SYMBOL(mn10300_dcache_inv_page);
#ifdef CONFIG_MN10300_CACHE_WBACK
EXPORT_SYMBOL(mn10300_dcache_flush);
EXPORT_SYMBOL(mn10300_dcache_flush_inv);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_range);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_page);
EXPORT_SYMBOL(mn10300_dcache_flush_range);
EXPORT_SYMBOL(mn10300_dcache_flush_range2);
EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
* write a page back from the dcache and invalidate the icache so that we can
* run code from it that we've just written into it
*/
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
mn10300_dcache_flush_page(page_to_phys(page));
mn10300_icache_inv();
}
EXPORT_SYMBOL(flush_icache_page);
/*
* write some code we've just written back from the dcache and invalidate the
* icache so that we can run that code
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_MN10300_CACHE_WBACK
unsigned long addr, size, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
for (; start < end; start += size) {
/* work out how much of the page to flush */
off = start & (PAGE_SIZE - 1);
size = end - start;
if (size > PAGE_SIZE - off)
size = PAGE_SIZE - off;
/* get the physical address the page is mapped to from the page
* tables */
pgd = pgd_offset(current->mm, start);
if (!pgd || !pgd_val(*pgd))
continue;
pud = pud_offset(pgd, start);
if (!pud || !pud_val(*pud))
continue;
pmd = pmd_offset(pud, start);
if (!pmd || !pmd_val(*pmd))
continue;
ppte = pte_offset_map(pmd, start);
if (!ppte)
continue;
pte = *ppte;
pte_unmap(ppte);
if (pte_none(pte))
continue;
page = pte_page(pte);
if (!page)
continue;
addr = page_to_phys(page);
/* flush the dcache and invalidate the icache coverage on that
* region */
mn10300_dcache_flush_range2(addr + off, size);
}
#endif
mn10300_icache_inv();
}
EXPORT_SYMBOL(flush_icache_range);
/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
{
if (end < start)
return -EINVAL;
flush_icache_range(start, end);
return 0;
}

View File

@@ -0,0 +1,56 @@
/* MN10300 Dynamic DMA mapping support
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* Derived from: arch/i386/kernel/pci-dma.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
{
unsigned long addr;
void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
addr = __get_free_pages(gfp, get_order(size));
if (!addr)
return NULL;
/* map the coherent memory through the uncached memory window */
ret = (void *) (addr | 0x20000000);
/* fill the memory with obvious rubbish */
memset((void *) addr, 0xfb, size);
/* write back and evict all cache lines covering this region */
mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
*dma_handle = virt_to_bus((void *) addr);
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
free_pages(addr, get_order(size));
}
EXPORT_SYMBOL(dma_free_coherent);

View File

@@ -0,0 +1,26 @@
/* MN10300 In-kernel exception handling
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return 1;
}
return 0;
}

View File

@@ -0,0 +1,404 @@
/* MN10300 MMU Fault handler
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/hardirq.h>
#include <asm/gdb-stub.h>
#include <asm/cpu-regs.h>
/*
* Unlock any spinlocks which will prevent us from getting the
* message out
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
#ifdef CONFIG_SMP
/* Many serial drivers do __global_cli() */
global_irq_lock = 0;
#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
unblank_screen();
#endif
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15; /* NMI oopser may have shut the console
* up */
printk(" ");
console_loglevel = loglevel_save;
}
}
void do_BUG(const char *file, int line)
{
bust_spinlocks(1);
printk(KERN_EMERG "------------[ cut here ]------------\n");
printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
}
#if 0
static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pgd = pgdir + __pgd_offset(address);
printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
pgd, (long long) pgd_val(*pgd));
if (!pgd_present(*pgd)) {
printk(KERN_DEBUG "... pgd not present!\n");
return;
}
pmd = pmd_offset(pgd, address);
printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
pmd, (long long)pmd_val(*pmd));
if (!pmd_present(*pmd)) {
printk(KERN_DEBUG "... pmd not present!\n");
return;
}
pte = pte_offset(pmd, address);
printk(KERN_DEBUG "pte entry %p: %016Lx\n",
pte, (long long) pte_val(*pte));
if (!pte_present(*pte))
printk(KERN_DEBUG "... pte not present!\n");
}
#endif
asmlinkage void monitor_signal(struct pt_regs *);
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* fault_code:
* - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
* - MSW: 0 if data access, 1 if instruction access
* - bit 0: TLB miss flag
* - bit 1: initial write
* - bit 2: page invalid
* - bit 3: protection violation
* - bit 4: accessor (0=user 1=kernel)
* - bit 5: 0=read 1=write
* - bit 6-8: page protection spec
* - bit 9: illegal address
* - bit 16: 0=data 1=ins
*
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long page;
siginfo_t info;
int write, fault;
#ifdef CONFIG_GDBSTUB
/* handle GDB stub causing a fault */
if (gdbstub_busy) {
gdbstub_exception(regs, TBR & TBR_INT_CODE);
return;
}
#endif
#if 0
printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
regs,
fault_code & 0x10000 ? "ins" : "data",
fault_code & 0xffff, address);
#endif
tsk = current;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* and that the fault was a page not present (invalid) error
*/
if (address >= VMALLOC_START && address < VMALLOC_END &&
(fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
(fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
)
goto vmalloc_fault;
mm = tsk->mm;
info.si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
/* accessing the stack below the stack pointer is always a
* bug */
if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
#if 0
printk(KERN_WARNING
"[%d] ### Access below stack @%lx (sp=%lx)\n",
current->pid, address, regs->sp);
printk(KERN_WARNING
"vma [%08x - %08x]\n",
vma->vm_start, vma->vm_end);
show_registers(regs);
printk(KERN_WARNING
"[%d] ### Code: [%08lx]"
" %02x %02x %02x %02x %02x %02x %02x %02x\n",
current->pid,
regs->pc,
((u8 *) regs->pc)[0],
((u8 *) regs->pc)[1],
((u8 *) regs->pc)[2],
((u8 *) regs->pc)[3],
((u8 *) regs->pc)[4],
((u8 *) regs->pc)[5],
((u8 *) regs->pc)[6],
((u8 *) regs->pc)[7]
);
#endif
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
default: /* 3: write, present */
case MMUFCR_xFC_TYPE_WRITE:
#ifdef TEST_VERIFY_AREA
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
#endif
/* write to absent page */
case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write++;
break;
/* read from protected page */
case MMUFCR_xFC_TYPE_READ:
goto bad_area;
/* read from absent page present */
case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
break;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
monitor_signal(regs);
/* User mode accesses just cause a SIGSEGV */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void *)address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
monitor_signal(regs);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
if (address < PAGE_SIZE)
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT
"Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(" printing pc:\n");
printk(KERN_ALERT "%08lx\n", regs->pc);
#ifdef CONFIG_GDBSTUB
gdbstub_intercept(
regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
#endif
page = PTBR;
page = ((unsigned long *) __va(page))[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & 1) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
die("Oops", regs, fault_code);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
monitor_signal(regs);
printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
do_exit(SIGKILL);
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
monitor_signal(regs);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
goto no_context;
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
goto no_context;
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
pgd = (pgd_t *) PTBR + index;
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}

View File

@@ -0,0 +1,160 @@
/* MN10300 Memory management initialisation
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/tlb.h>
#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
/*
* set up paging
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0,};
pte_t *ppte;
int loop;
/* main kernel space -> RAM mapping is handled as 1:1 transparent by
* the MMU */
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));
/* load the VMALLOC area PTE table addresses into the kernel PGD */
ppte = kernel_vmalloc_ptes;
for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
loop++
) {
set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
ppte += PAGE_SIZE / sizeof(pte_t);
}
/* declare the sizes of the RAM zones (only use the normal zone) */
zones_size[ZONE_NORMAL] =
contig_page_data.bdata->node_low_pfn -
contig_page_data.bdata->node_min_pfn;
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
__flush_tlb_all();
}
/*
* transfer all the memory from the bootmem allocator to the runtime allocator
*/
void __init mem_init(void)
{
int codesize, reservedpages, datasize, initsize;
int tmp;
if (!mem_map)
BUG();
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
reservedpages = 0;
for (tmp = 0; tmp < num_physpages; tmp++)
if (PageReserved(&mem_map[tmp]))
reservedpages++;
codesize = (unsigned long) &_etext - (unsigned long) &_stext;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk(KERN_INFO
"Memory: %luk/%luk available"
" (%dk kernel code, %dk reserved, %dk data, %dk init,"
" %ldk highmem)\n",
nr_free_pages() << (PAGE_SHIFT - 10),
max_mapnr << (PAGE_SHIFT - 10),
codesize >> 10,
reservedpages << (PAGE_SHIFT - 10),
datasize >> 10,
initsize >> 10,
(unsigned long) (totalhigh_pages << (PAGE_SHIFT - 10))
);
}
/*
*
*/
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *) addr, 0xcc, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
/*
* recycle memory containing stuff only required for initialisation
*/
void free_initmem(void)
{
free_init_pages("unused kernel memory",
(unsigned long) &__init_begin,
(unsigned long) &__init_end);
}
/*
* dispose of the memory on which the initial ramdisk resided
*/
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory", start, end);
}
#endif

View File

@@ -0,0 +1,968 @@
/* MN10300 Misalignment fixup handler
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#include <asm/cpu-regs.h>
#include <asm/busctl-regs.h>
#include <asm/fpu.h>
#include <asm/gdb-stub.h>
#include <asm/asm-offsets.h>
#if 0
#define kdebug(FMT, ...) printk(KERN_DEBUG "MISALIGN: "FMT"\n", ##__VA_ARGS__)
#else
#define kdebug(FMT, ...) do {} while (0)
#endif
static int misalignment_addr(unsigned long *registers, unsigned long sp,
unsigned params, unsigned opcode,
unsigned long disp,
void **_address, unsigned long **_postinc,
unsigned long *_inc);
static int misalignment_reg(unsigned long *registers, unsigned params,
unsigned opcode, unsigned long disp,
unsigned long **_register);
static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode);
static const unsigned Dreg_index[] = {
REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
};
static const unsigned Areg_index[] = {
REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2
};
static const unsigned Rreg_index[] = {
REG_E0 >> 2, REG_E1 >> 2, REG_E2 >> 2, REG_E3 >> 2,
REG_E4 >> 2, REG_E5 >> 2, REG_E6 >> 2, REG_E7 >> 2,
REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2,
REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
};
enum format_id {
FMT_S0,
FMT_S1,
FMT_S2,
FMT_S4,
FMT_D0,
FMT_D1,
FMT_D2,
FMT_D4,
FMT_D6,
FMT_D7,
FMT_D8,
FMT_D9,
FMT_D10,
};
static const struct {
u_int8_t opsz, dispsz;
} format_tbl[16] = {
[FMT_S0] = { 8, 0 },
[FMT_S1] = { 8, 8 },
[FMT_S2] = { 8, 16 },
[FMT_S4] = { 8, 32 },
[FMT_D0] = { 16, 0 },
[FMT_D1] = { 16, 8 },
[FMT_D2] = { 16, 16 },
[FMT_D4] = { 16, 32 },
[FMT_D6] = { 24, 0 },
[FMT_D7] = { 24, 8 },
[FMT_D8] = { 24, 24 },
[FMT_D9] = { 24, 32 },
[FMT_D10] = { 32, 0 },
};
enum value_id {
DM0, /* data reg in opcode in bits 0-1 */
DM1, /* data reg in opcode in bits 2-3 */
DM2, /* data reg in opcode in bits 4-5 */
AM0, /* addr reg in opcode in bits 0-1 */
AM1, /* addr reg in opcode in bits 2-3 */
AM2, /* addr reg in opcode in bits 4-5 */
RM0, /* reg in opcode in bits 0-3 */
RM1, /* reg in opcode in bits 2-5 */
RM2, /* reg in opcode in bits 4-7 */
RM4, /* reg in opcode in bits 8-11 */
RM6, /* reg in opcode in bits 12-15 */
RD0, /* reg in displacement in bits 0-3 */
RD2, /* reg in displacement in bits 4-7 */
SP, /* stack pointer */
SD8, /* 8-bit signed displacement */
SD16, /* 16-bit signed displacement */
SD24, /* 24-bit signed displacement */
SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */
SIMM8, /* 8-bit signed immediate */
IMM8, /* 8-bit unsigned immediate */
IMM16, /* 16-bit unsigned immediate */
IMM24, /* 24-bit unsigned immediate */
IMM32, /* 32-bit unsigned immediate */
IMM32_HIGH8, /* 32-bit unsigned immediate, LSB in opcode */
IMM32_MEM, /* 32-bit unsigned displacement */
IMM32_HIGH8_MEM, /* 32-bit unsigned displacement, LSB in opcode */
DN0 = DM0,
DN1 = DM1,
DN2 = DM2,
AN0 = AM0,
AN1 = AM1,
AN2 = AM2,
RN0 = RM0,
RN1 = RM1,
RN2 = RM2,
RN4 = RM4,
RN6 = RM6,
DI = DM1,
RI = RM2,
};
struct mn10300_opcode {
const char name[8];
u_int32_t opcode;
u_int32_t opmask;
unsigned exclusion;
enum format_id format;
unsigned cpu_mask;
#define AM33 330
unsigned params[2];
#define MEM(ADDR) (0x80000000 | (ADDR))
#define MEM2(ADDR1, ADDR2) (0x80000000 | (ADDR1) << 8 | (ADDR2))
#define MEMINC(ADDR) (0x81000000 | (ADDR))
#define MEMINC2(ADDR, INC) (0x81000000 | (ADDR) << 8 | (INC))
};
/* LIBOPCODES EXCERPT
Assemble Matsushita MN10300 instructions.
Copyright 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public Licence as published by
the Free Software Foundation; either version 2 of the Licence, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public Licence for more details.
You should have received a copy of the GNU General Public Licence
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
static const struct mn10300_opcode mn10300_opcodes[] = {
{ "mov", 0x4200, 0xf300, 0, FMT_S1, 0, {DM1, MEM2(IMM8, SP)}},
{ "mov", 0x4300, 0xf300, 0, FMT_S1, 0, {AM1, MEM2(IMM8, SP)}},
{ "mov", 0x5800, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), DN0}},
{ "mov", 0x5c00, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), AN0}},
{ "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}},
{ "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}},
{ "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}},
{ "mov", 0xf010, 0xfff0, 0, FMT_D0, 0, {AM1, MEM(AN0)}},
{ "mov", 0xf300, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
{ "mov", 0xf340, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
{ "mov", 0xf380, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), AN2}},
{ "mov", 0xf3c0, 0xffc0, 0, FMT_D0, 0, {AM2, MEM2(DI, AN0)}},
{ "mov", 0xf80000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
{ "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
{ "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}},
{ "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}},
{ "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
{ "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
{ "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
{ "mov", 0xf97a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
{ "mov", 0xfa000000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
{ "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
{ "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}},
{ "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}},
{ "mov", 0xfa900000, 0xfff30000, 0, FMT_D2, 0, {AM1, MEM2(IMM16, SP)}},
{ "mov", 0xfa910000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}},
{ "mov", 0xfab00000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), AN0}},
{ "mov", 0xfab40000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}},
{ "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
{ "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
{ "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
{ "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
{ "mov", 0xfb8a0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}},
{ "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
{ "mov", 0xfb9a0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}},
{ "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
{ "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
{ "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
{ "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}},
{ "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}},
{ "mov", 0xfc800000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM(IMM32_MEM)}},
{ "mov", 0xfc810000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}},
{ "mov", 0xfc900000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM2(IMM32, SP)}},
{ "mov", 0xfc910000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}},
{ "mov", 0xfca00000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), AN0}},
{ "mov", 0xfca40000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}},
{ "mov", 0xfcb00000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), AN0}},
{ "mov", 0xfcb40000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}},
{ "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
{ "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
{ "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
{ "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
{ "mov", 0xfd8a0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}},
{ "mov", 0xfd9a0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}},
{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
{ "mov", 0xfe0e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}},
{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
{ "mov", 0xfe1e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}},
{ "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
{ "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
{ "mov", 0xfe8a0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}},
{ "mov", 0xfe9a0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}},
{ "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}},
{ "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}},
{ "movhu", 0xf480, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
{ "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
{ "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
{ "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
{ "movhu", 0xf89300, 0xfff300, 0, FMT_D1, 0, {DM1, MEM2(IMM8, SP)}},
{ "movhu", 0xf8bc00, 0xfffc00, 0, FMT_D1, 0, {MEM2(IMM8, SP), DN0}},
{ "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
{ "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
{ "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
{ "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
{ "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
{ "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
{ "movhu", 0xfa930000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}},
{ "movhu", 0xfabc0000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}},
{ "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
{ "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
{ "movhu", 0xfbca0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}},
{ "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
{ "movhu", 0xfbda0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}},
{ "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
{ "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
{ "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
{ "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
{ "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
{ "movhu", 0xfc830000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}},
{ "movhu", 0xfc930000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}},
{ "movhu", 0xfcac0000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}},
{ "movhu", 0xfcbc0000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}},
{ "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
{ "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
{ "movhu", 0xfdca0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}},
{ "movhu", 0xfdda0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}},
{ "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
{ "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
{ "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
{ "movhu", 0xfe4e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}},
{ "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
{ "movhu", 0xfe5e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}},
{ "movhu", 0xfeca0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}},
{ "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}},
{ "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
{ "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
{ "mov_llt", 0xf7e00000, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lgt", 0xf7e00001, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lge", 0xf7e00002, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lle", 0xf7e00003, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lcs", 0xf7e00004, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lhi", 0xf7e00005, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lcc", 0xf7e00006, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lls", 0xf7e00007, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_leq", 0xf7e00008, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "", 0, 0, 0, 0, 0, {0}},
};
/*
* fix up misalignment problems where possible
*/
asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)
{
const struct exception_table_entry *fixup;
const struct mn10300_opcode *pop;
unsigned long *registers = (unsigned long *) regs;
unsigned long data, *store, *postinc, disp, inc, sp;
mm_segment_t seg;
siginfo_t info;
uint32_t opcode, noc, xo, xm;
uint8_t *pc, byte, datasz;
void *address;
unsigned tmp, npop, dispsz, loop;
/* we don't fix up userspace misalignment faults */
if (user_mode(regs))
goto bus_error;
sp = (unsigned long) regs + sizeof(*regs);
kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp);
if (regs->epsw & EPSW_IE)
asm volatile("or %0,epsw" : : "i"(EPSW_IE));
seg = get_fs();
set_fs(KERNEL_DS);
fixup = search_exception_tables(regs->pc);
/* first thing to do is to match the opcode */
pc = (u_int8_t *) regs->pc;
if (__get_user(byte, pc) != 0)
goto fetch_error;
opcode = byte;
noc = 8;
for (pop = mn10300_opcodes; pop->name[0]; pop++) {
npop = ilog2(pop->opcode | pop->opmask);
if (npop <= 0 || npop > 31)
continue;
npop = (npop + 8) & ~7;
got_more_bits:
if (npop == noc) {
if ((opcode & pop->opmask) == pop->opcode)
goto found_opcode;
} else if (npop > noc) {
xo = pop->opcode >> (npop - noc);
xm = pop->opmask >> (npop - noc);
if ((opcode & xm) != xo)
continue;
/* we've got a partial match (an exact match on the
* first N bytes), so we need to get some more data */
pc++;
if (__get_user(byte, pc) != 0)
goto fetch_error;
opcode = opcode << 8 | byte;
noc += 8;
goto got_more_bits;
} else {
/* there's already been a partial match as long as the
* complete match we're now considering, so this one
* should't match */
continue;
}
}
/* didn't manage to find a fixup */
printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n",
regs->pc, opcode);
failed:
set_fs(seg);
if (die_if_no_fixup("misalignment error", regs, code))
return;
bus_error:
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGBUS, &info, current);
return;
/* error reading opcodes */
fetch_error:
printk(KERN_CRIT
"MISALIGN: %p: fault whilst reading instruction data\n",
pc);
goto failed;
bad_addr_mode:
printk(KERN_CRIT
"MISALIGN: %lx: unsupported addressing mode %x\n",
regs->pc, opcode);
goto failed;
bad_reg_mode:
printk(KERN_CRIT
"MISALIGN: %lx: unsupported register mode %x\n",
regs->pc, opcode);
goto failed;
unsupported_instruction:
printk(KERN_CRIT
"MISALIGN: %lx: unsupported instruction %x (%s)\n",
regs->pc, opcode, pop->name);
goto failed;
transfer_failed:
set_fs(seg);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
if (die_if_no_fixup("misalignment fixup", regs, code))
return;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = 0;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGSEGV, &info, current);
return;
/* we matched the opcode */
found_opcode:
kdebug("%lx: %x==%x { %x, %x }",
regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
tmp = format_tbl[pop->format].opsz;
if (tmp > noc)
BUG(); /* match was less complete than it ought to have been */
if (tmp < noc) {
tmp = noc - tmp;
opcode >>= tmp;
pc -= tmp >> 3;
}
/* grab the extra displacement (note it's LSB first) */
disp = 0;
dispsz = format_tbl[pop->format].dispsz;
for (loop = 0; loop < dispsz; loop += 8) {
pc++;
if (__get_user(byte, pc) != 0)
goto fetch_error;
disp |= byte << loop;
kdebug("{%p} disp[%02x]=%02x", pc, loop, byte);
}
kdebug("disp=%lx", disp);
set_fs(KERNEL_XDS);
if (fixup)
set_fs(seg);
tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000;
if (!tmp) {
printk(KERN_CRIT
"MISALIGN: %lx: insn not move to/from memory %x\n",
regs->pc, opcode);
goto failed;
}
/* determine the data transfer size of the move */
if (pop->name[3] == 0 || /* "mov" */
pop->name[4] == 'l') /* mov_lcc */
inc = datasz = 4;
else if (pop->name[3] == 'h') /* movhu */
inc = datasz = 2;
else
goto unsupported_instruction;
if (pop->params[0] & 0x80000000) {
/* move memory to register */
if (!misalignment_addr(registers, sp,
pop->params[0], opcode, disp,
&address, &postinc, &inc))
goto bad_addr_mode;
if (!misalignment_reg(registers, pop->params[1], opcode, disp,
&store))
goto bad_reg_mode;
kdebug("mov%u (%p),DARn", datasz, address);
if (copy_from_user(&data, (void *) address, datasz) != 0)
goto transfer_failed;
if (pop->params[0] & 0x1000000) {
kdebug("inc=%lx", inc);
*postinc += inc;
}
*store = data;
kdebug("loaded %lx", data);
} else {
/* move register to memory */
if (!misalignment_reg(registers, pop->params[0], opcode, disp,
&store))
goto bad_reg_mode;
if (!misalignment_addr(registers, sp,
pop->params[1], opcode, disp,
&address, &postinc, &inc))
goto bad_addr_mode;
data = *store;
kdebug("mov%u %lx,(%p)", datasz, data, address);
if (copy_to_user((void *) address, &data, datasz) != 0)
goto transfer_failed;
if (pop->params[1] & 0x1000000)
*postinc += inc;
}
tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz;
regs->pc += tmp >> 3;
/* handle MOV_Lcc, which are currently the only FMT_D10 insns that
* access memory */
if (pop->format == FMT_D10)
misalignment_MOV_Lcc(regs, opcode);
set_fs(seg);
}
/*
* determine the address that was being accessed
*/
static int misalignment_addr(unsigned long *registers, unsigned long sp,
unsigned params, unsigned opcode,
unsigned long disp,
void **_address, unsigned long **_postinc,
unsigned long *_inc)
{
unsigned long *postinc = NULL, address = 0, tmp;
if (!(params & 0x1000000)) {
kdebug("noinc");
*_inc = 0;
_inc = NULL;
}
params &= 0x00ffffff;
do {
switch (params & 0xff) {
case DM0:
postinc = &registers[Dreg_index[opcode & 0x03]];
address += *postinc;
break;
case DM1:
postinc = &registers[Dreg_index[opcode >> 2 & 0x03]];
address += *postinc;
break;
case DM2:
postinc = &registers[Dreg_index[opcode >> 4 & 0x03]];
address += *postinc;
break;
case AM0:
postinc = &registers[Areg_index[opcode & 0x03]];
address += *postinc;
break;
case AM1:
postinc = &registers[Areg_index[opcode >> 2 & 0x03]];
address += *postinc;
break;
case AM2:
postinc = &registers[Areg_index[opcode >> 4 & 0x03]];
address += *postinc;
break;
case RM0:
postinc = &registers[Rreg_index[opcode & 0x0f]];
address += *postinc;
break;
case RM1:
postinc = &registers[Rreg_index[opcode >> 2 & 0x0f]];
address += *postinc;
break;
case RM2:
postinc = &registers[Rreg_index[opcode >> 4 & 0x0f]];
address += *postinc;
break;
case RM4:
postinc = &registers[Rreg_index[opcode >> 8 & 0x0f]];
address += *postinc;
break;
case RM6:
postinc = &registers[Rreg_index[opcode >> 12 & 0x0f]];
address += *postinc;
break;
case RD0:
postinc = &registers[Rreg_index[disp & 0x0f]];
address += *postinc;
break;
case RD2:
postinc = &registers[Rreg_index[disp >> 4 & 0x0f]];
address += *postinc;
break;
case SP:
address += sp;
break;
/* displacements are either to be added to the address
* before use, or, in the case of post-inc addressing,
* to be added into the base register after use */
case SD8:
case SIMM8:
disp = (long) (int8_t) (disp & 0xff);
goto displace_or_inc;
case SD16:
disp = (long) (int16_t) (disp & 0xffff);
goto displace_or_inc;
case SD24:
tmp = disp << 8;
asm("asr 8,%0" : "=r"(tmp) : "0"(tmp));
disp = (long) tmp;
goto displace_or_inc;
case SIMM4_2:
tmp = opcode >> 4 & 0x0f;
tmp <<= 28;
asm("asr 28,%0" : "=r"(tmp) : "0"(tmp));
disp = (long) tmp;
goto displace_or_inc;
case IMM8:
disp &= 0x000000ff;
goto displace_or_inc;
case IMM16:
disp &= 0x0000ffff;
goto displace_or_inc;
case IMM24:
disp &= 0x00ffffff;
goto displace_or_inc;
case IMM32:
case IMM32_MEM:
case IMM32_HIGH8:
case IMM32_HIGH8_MEM:
displace_or_inc:
kdebug("%s %lx", _inc ? "incr" : "disp", disp);
if (!_inc)
address += disp;
else
*_inc = disp;
break;
default:
BUG();
return 0;
}
} while ((params >>= 8));
*_address = (void *) address;
*_postinc = postinc;
return 1;
}
/*
* determine the register that is acting as source/dest
*/
static int misalignment_reg(unsigned long *registers, unsigned params,
unsigned opcode, unsigned long disp,
unsigned long **_register)
{
params &= 0x7fffffff;
if (params & 0xffffff00)
return 0;
switch (params & 0xff) {
case DM0:
*_register = &registers[Dreg_index[opcode & 0x03]];
break;
case DM1:
*_register = &registers[Dreg_index[opcode >> 2 & 0x03]];
break;
case DM2:
*_register = &registers[Dreg_index[opcode >> 4 & 0x03]];
break;
case AM0:
*_register = &registers[Areg_index[opcode & 0x03]];
break;
case AM1:
*_register = &registers[Areg_index[opcode >> 2 & 0x03]];
break;
case AM2:
*_register = &registers[Areg_index[opcode >> 4 & 0x03]];
break;
case RM0:
*_register = &registers[Rreg_index[opcode & 0x0f]];
break;
case RM1:
*_register = &registers[Rreg_index[opcode >> 2 & 0x0f]];
break;
case RM2:
*_register = &registers[Rreg_index[opcode >> 4 & 0x0f]];
break;
case RM4:
*_register = &registers[Rreg_index[opcode >> 8 & 0x0f]];
break;
case RM6:
*_register = &registers[Rreg_index[opcode >> 12 & 0x0f]];
break;
case RD0:
*_register = &registers[Rreg_index[disp & 0x0f]];
break;
case RD2:
*_register = &registers[Rreg_index[disp >> 4 & 0x0f]];
break;
case SP:
*_register = &registers[REG_SP >> 2];
break;
default:
BUG();
return 0;
}
return 1;
}
/*
* handle the conditional loop part of the move-and-loop instructions
*/
static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode)
{
unsigned long epsw = regs->epsw;
unsigned long NxorV;
kdebug("MOV_Lcc %x [flags=%lx]", opcode, epsw & 0xf);
/* calculate N^V and shift onto the same bit position as Z */
NxorV = ((epsw >> 3) ^ epsw >> 1) & 1;
switch (opcode & 0xf) {
case 0x0: /* MOV_LLT: N^V */
if (NxorV)
goto take_the_loop;
return;
case 0x1: /* MOV_LGT: ~(Z or (N^V))*/
if (!((epsw & EPSW_FLAG_Z) | NxorV))
goto take_the_loop;
return;
case 0x2: /* MOV_LGE: ~(N^V) */
if (!NxorV)
goto take_the_loop;
return;
case 0x3: /* MOV_LLE: Z or (N^V) */
if ((epsw & EPSW_FLAG_Z) | NxorV)
goto take_the_loop;
return;
case 0x4: /* MOV_LCS: C */
if (epsw & EPSW_FLAG_C)
goto take_the_loop;
return;
case 0x5: /* MOV_LHI: ~(C or Z) */
if (!(epsw & (EPSW_FLAG_C | EPSW_FLAG_Z)))
goto take_the_loop;
return;
case 0x6: /* MOV_LCC: ~C */
if (!(epsw & EPSW_FLAG_C))
goto take_the_loop;
return;
case 0x7: /* MOV_LLS: C or Z */
if (epsw & (EPSW_FLAG_C | EPSW_FLAG_Z))
goto take_the_loop;
return;
case 0x8: /* MOV_LEQ: Z */
if (epsw & EPSW_FLAG_Z)
goto take_the_loop;
return;
case 0x9: /* MOV_LNE: ~Z */
if (!(epsw & EPSW_FLAG_Z))
goto take_the_loop;
return;
case 0xa: /* MOV_LRA: always */
goto take_the_loop;
default:
BUG();
}
take_the_loop:
/* wind the PC back to just after the SETLB insn */
kdebug("loop LAR=%lx", regs->lar);
regs->pc = regs->lar - 4;
}
/*
* misalignment handler tests
*/
#ifdef CONFIG_TEST_MISALIGNMENT_HANDLER
static u8 __initdata testbuf[512] __attribute__((aligned(16))) = {
[257] = 0x11,
[258] = 0x22,
[259] = 0x33,
[260] = 0x44,
};
#define ASSERTCMP(X, OP, Y) \
do { \
if (unlikely(!((X) OP (Y)))) { \
printk(KERN_ERR "\n"); \
printk(KERN_ERR "MISALIGN: Assertion failed at line %u\n", \
__LINE__); \
printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
(unsigned long)(X), (unsigned long)(Y)); \
BUG(); \
} \
} while(0)
static int __init test_misalignment(void)
{
register void *r asm("e0");
register u32 y asm("e1");
void *p = testbuf, *q;
u32 tmp, tmp2, x;
printk(KERN_NOTICE "==>test_misalignment() [testbuf=%p]\n", p);
p++;
printk(KERN_NOTICE "___ MOV (Am),Dn ___\n");
q = p + 256;
asm volatile("mov (%0),%1" : "+a"(q), "=d"(x));
ASSERTCMP(q, ==, p + 256);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (256,Am),Dn ___\n");
q = p;
asm volatile("mov (256,%0),%1" : "+a"(q), "=d"(x));
ASSERTCMP(q, ==, p);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (Di,Am),Dn ___\n");
tmp = 256;
q = p;
asm volatile("mov (%2,%0),%1" : "+a"(q), "=d"(x), "+d"(tmp));
ASSERTCMP(q, ==, p);
ASSERTCMP(x, ==, 0x44332211);
ASSERTCMP(tmp, ==, 256);
printk(KERN_NOTICE "___ MOV (256,Rm),Rn ___\n");
r = p;
asm volatile("mov (256,%0),%1" : "+r"(r), "=r"(y));
ASSERTCMP(r, ==, p);
ASSERTCMP(y, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (Rm+),Rn ___\n");
r = p + 256;
asm volatile("mov (%0+),%1" : "+r"(r), "=r"(y));
ASSERTCMP(r, ==, p + 256 + 4);
ASSERTCMP(y, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (Rm+,8),Rn ___\n");
r = p + 256;
asm volatile("mov (%0+,8),%1" : "+r"(r), "=r"(y));
ASSERTCMP(r, ==, p + 256 + 8);
ASSERTCMP(y, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (7,SP),Rn ___\n");
asm volatile(
"add -16,sp \n"
"mov +0x11,%0 \n"
"movbu %0,(7,sp) \n"
"mov +0x22,%0 \n"
"movbu %0,(8,sp) \n"
"mov +0x33,%0 \n"
"movbu %0,(9,sp) \n"
"mov +0x44,%0 \n"
"movbu %0,(10,sp) \n"
"mov (7,sp),%1 \n"
"add +16,sp \n"
: "+a"(q), "=d"(x));
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (259,SP),Rn ___\n");
asm volatile(
"add -264,sp \n"
"mov +0x11,%0 \n"
"movbu %0,(259,sp) \n"
"mov +0x22,%0 \n"
"movbu %0,(260,sp) \n"
"mov +0x33,%0 \n"
"movbu %0,(261,sp) \n"
"mov +0x55,%0 \n"
"movbu %0,(262,sp) \n"
"mov (259,sp),%1 \n"
"add +264,sp \n"
: "+d"(tmp), "=d"(x));
ASSERTCMP(x, ==, 0x55332211);
printk(KERN_NOTICE "___ MOV (260,SP),Rn ___\n");
asm volatile(
"add -264,sp \n"
"mov +0x11,%0 \n"
"movbu %0,(260,sp) \n"
"mov +0x22,%0 \n"
"movbu %0,(261,sp) \n"
"mov +0x33,%0 \n"
"movbu %0,(262,sp) \n"
"mov +0x55,%0 \n"
"movbu %0,(263,sp) \n"
"mov (260,sp),%1 \n"
"add +264,sp \n"
: "+d"(tmp), "=d"(x));
ASSERTCMP(x, ==, 0x55332211);
printk(KERN_NOTICE "___ MOV_LNE ___\n");
tmp = 1;
tmp2 = 2;
q = p + 256;
asm volatile(
"setlb \n"
"mov %2,%3 \n"
"mov %1,%2 \n"
"cmp +0,%1 \n"
"mov_lne (%0+,4),%1"
: "+r"(q), "+d"(tmp), "+d"(tmp2), "=d"(x)
:
: "cc");
ASSERTCMP(q, ==, p + 256 + 12);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV in SETLB ___\n");
tmp = 1;
tmp2 = 2;
q = p + 256;
asm volatile(
"setlb \n"
"mov %1,%3 \n"
"mov (%0+),%1 \n"
"cmp +0,%1 \n"
"lne "
: "+a"(q), "+d"(tmp), "+d"(tmp2), "=d"(x)
:
: "cc");
ASSERTCMP(q, ==, p + 256 + 8);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "<==test_misalignment()\n");
return 0;
}
arch_initcall(test_misalignment);
#endif /* CONFIG_TEST_MISALIGNMENT_HANDLER */

View File

@@ -0,0 +1,80 @@
/* MN10300 MMU context allocation and management
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
/*
* list of the MMU contexts last allocated on each CPU
*/
unsigned long mmu_context_cache[NR_CPUS] = {
[0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
};
/*
* flush the specified TLB entry
*/
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
unsigned long pteu, cnx, flags;
addr &= PAGE_MASK;
/* make sure the context doesn't migrate and defend against
* interference from vmalloc'd regions */
local_irq_save(flags);
cnx = mm_context(vma->vm_mm);
if (cnx != MMU_NO_CONTEXT) {
pteu = addr | (cnx & 0x000000ffUL);
IPTEU = pteu;
DPTEU = pteu;
if (IPTEL & xPTEL_V)
IPTEL = 0;
if (DPTEL & xPTEL_V)
DPTEL = 0;
}
local_irq_restore(flags);
}
/*
* preemptively set a TLB entry
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
unsigned long pteu, ptel, cnx, flags;
addr &= PAGE_MASK;
ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
/* make sure the context doesn't migrate and defend against
* interference from vmalloc'd regions */
local_irq_save(flags);
cnx = mm_context(vma->vm_mm);
if (cnx != MMU_NO_CONTEXT) {
pteu = addr | (cnx & 0x000000ffUL);
if (!(pte_val(pte) & _PAGE_NX)) {
IPTEU = pteu;
if (IPTEL & xPTEL_V)
IPTEL = ptel;
}
DPTEU = pteu;
if (DPTEL & xPTEL_V)
DPTEL = ptel;
}
local_irq_restore(flags);
}

View File

@@ -0,0 +1,170 @@
/* MN10300 Page table management
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/quicklist.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
/*
* Associate a large virtual page frame with a given physical page frame
* and protection flags for that frame. pfn is for the base of the page,
* vaddr is what the page gets mapped to - both must be properly aligned.
* The pmd must already be instantiated. Assumes PAE mode.
*/
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
return; /* BUG(); */
}
if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
return; /* BUG(); */
}
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
return; /* BUG(); */
}
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
set_pmd(pmd, pfn_pmd(pfn, flags));
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
__flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
#ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
if (pte)
clear_highpage(pte);
return pte;
}
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
* in both cached and uncached pgd's; not needed for PAE since the
* kernel pmd is shared. If PAE were not to share the pmd a similar
* tactic would be needed. This is essentially codepath-based locking
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
* If the locking proves to be non-performant, a ticketing scheme with
* checks at dup_mmap(), exec(), and other mmlist addition points
* could be used. The locking scheme was chosen on the basis of
* manfred's recommendations and having no core impact whatsoever.
* -- wli
*/
DEFINE_SPINLOCK(pgd_lock);
struct page *pgd_list;
static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
page->index = (unsigned long) pgd_list;
if (pgd_list)
set_page_private(pgd_list, (unsigned long) &page->index);
pgd_list = page;
set_page_private(page, (unsigned long) &pgd_list);
}
static inline void pgd_list_del(pgd_t *pgd)
{
struct page *next, **pprev, *page = virt_to_page(pgd);
next = (struct page *) page->index;
pprev = (struct page **) page_private(page);
*pprev = next;
if (next)
set_page_private(next, (unsigned long) pprev);
}
void pgd_ctor(void *pgd)
{
unsigned long flags;
if (PTRS_PER_PMD == 1)
spin_lock_irqsave(&pgd_lock, flags);
memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
if (PTRS_PER_PMD > 1)
return;
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd)
{
unsigned long flags; /* can be called from interrupt context */
spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, pgd_dtor, pgd);
}
void __init pgtable_cache_init(void)
{
}
void check_pgt_cache(void)
{
quicklist_trim(0, pgd_dtor, 25, 16);
}

View File

@@ -0,0 +1,189 @@
###############################################################################
#
# TLB loading functions
#
# Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
# Modified by David Howells (dhowells@redhat.com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence
# as published by the Free Software Foundation; either version
# 2 of the Licence, or (at your option) any later version.
#
###############################################################################
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/intctl-regs.h>
#include <asm/frame.inc>
#include <asm/page.h>
#include <asm/pgtable.h>
###############################################################################
#
# Instruction TLB Miss handler entry point
#
###############################################################################
.type itlb_miss,@function
ENTRY(itlb_miss)
and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
# register bank
nop
nop
nop
#endif
mov (IPTEU),d3
mov (PTBR),a2
mov d3,d2
and 0xffc00000,d2
lsr 20,d2
mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
btst _PAGE_VALID,a2
beq itlb_miss_fault # jump if doesn't point anywhere
and ~(PAGE_SIZE-1),a2
mov d3,d2
and 0x003ff000,d2
lsr 10,d2
add d2,a2
mov (a2),d2 # get pte from PTD[addr 21..12]
btst _PAGE_VALID,d2
beq itlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
bset _PAGE_ACCESSED,(0,a2)
and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
itlb_miss_set:
mov d2,(IPTEL) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
rti
itlb_miss_fault:
mov _PAGE_VALID,d2 # force address error handler to be
# invoked
bra itlb_miss_set
.size itlb_miss, . - itlb_miss
###############################################################################
#
# Data TLB Miss handler entry point
#
###############################################################################
.type dtlb_miss,@function
ENTRY(dtlb_miss)
and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
# register bank
nop
nop
nop
#endif
mov (DPTEU),d3
mov (PTBR),a2
mov d3,d2
and 0xffc00000,d2
lsr 20,d2
mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
btst _PAGE_VALID,a2
beq dtlb_miss_fault # jump if doesn't point anywhere
and ~(PAGE_SIZE-1),a2
mov d3,d2
and 0x003ff000,d2
lsr 10,d2
add d2,a2
mov (a2),d2 # get pte from PTD[addr 21..12]
btst _PAGE_VALID,d2
beq dtlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
bset _PAGE_ACCESSED,(0,a2)
and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
dtlb_miss_set:
mov d2,(DPTEL) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
rti
dtlb_miss_fault:
mov _PAGE_VALID,d2 # force address error handler to be
# invoked
bra dtlb_miss_set
.size dtlb_miss, . - dtlb_miss
###############################################################################
#
# Instruction TLB Address Error handler entry point
#
###############################################################################
.type itlb_aerror,@function
ENTRY(itlb_aerror)
and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
add -4,sp # need to pass three params
# calculate the fault code
movhu (MMUFCR_IFC),d1
or 0x00010000,d1 # it's an instruction fetch
# determine the page address
mov (IPTEU),a2
mov a2,d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
mov d0,(IPTEL)
and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
jmp ret_from_exception
.size itlb_aerror, . - itlb_aerror
###############################################################################
#
# Data TLB Address Error handler entry point
#
###############################################################################
.type dtlb_aerror,@function
ENTRY(dtlb_aerror)
and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
add -4,sp # need to pass three params
# calculate the fault code
movhu (MMUFCR_DFC),d1
# determine the page address
mov (DPTEU),a2
mov a2,d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
mov d0,(DPTEL)
and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
jmp ret_from_exception
.size dtlb_aerror, . - dtlb_aerror