add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,14 @@
#
# arch/blackfin/mach-common/Makefile
#
obj-y := \
cache.o cache-c.o entry.o head.o \
interrupt.o arch_checks.o ints-priority.o
obj-$(CONFIG_PM) += pm.o dpmc_modes.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
obj-$(CONFIG_DEBUG_ICACHE_CHECK) += irqpanic.o

View File

@@ -0,0 +1,64 @@
/*
* Do some checking to make sure things are OK
*
* Copyright 2007-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <asm/fixed_code.h>
#include <mach/anomaly.h>
#include <asm/clocks.h>
#ifdef CONFIG_BFIN_KERNEL_CLOCK
# if (CONFIG_VCO_HZ > CONFIG_MAX_VCO_HZ)
# error "VCO selected is more than maximum value. Please change the VCO multipler"
# endif
# if (CONFIG_SCLK_HZ > CONFIG_MAX_SCLK_HZ)
# error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier"
# endif
# if (CONFIG_SCLK_HZ < CONFIG_MIN_SCLK_HZ)
# error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier"
# endif
# if (ANOMALY_05000273) && (CONFIG_SCLK_HZ * 2 > CONFIG_CCLK_HZ)
# error "ANOMALY 05000273, please make sure CCLK is at least 2x SCLK"
# endif
# if (CONFIG_SCLK_HZ > CONFIG_CCLK_HZ) && (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) && (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ)
# error "Please select sclk less than cclk"
# endif
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
#if CONFIG_BOOT_LOAD < FIXED_CODE_END
# error "The kernel load address must be after the fixed code section"
#endif
#if (CONFIG_BOOT_LOAD & 0x3)
# error "The kernel load address must be 4 byte aligned"
#endif
/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
# error "The kernel load address is too high; keep it below 10meg for safety"
#endif
#if ANOMALY_05000448
# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
#endif
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
#if ANOMALY_05000220 && \
((defined(CONFIG_BFIN_EXTMEM_WRITEBACK) && !defined(CONFIG_BFIN_L2_DCACHEABLE)) || \
(!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
#endif
#if ANOMALY_05000475 && \
(defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
# error "Anomaly 475 does not allow you to use Write Back cache with L2 or External Memory"
#endif

View File

@@ -0,0 +1,76 @@
/*
* Blackfin cache control code (simpler control-style functions)
*
* Copyright 2004-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
#include <asm/blackfin.h>
#include <asm/cplbinit.h>
/* Invalidate the Entire Data cache by
* clearing DMC[1:0] bits
*/
void blackfin_invalidate_entire_dcache(void)
{
u32 dmem = bfin_read_DMEM_CONTROL();
bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC();
bfin_write_DMEM_CONTROL(dmem);
SSYNC();
}
/* Invalidate the Entire Instruction cache by
* clearing IMC bit
*/
void blackfin_invalidate_entire_icache(void)
{
u32 imem = bfin_read_IMEM_CONTROL();
bfin_write_IMEM_CONTROL(imem & ~0x4);
SSYNC();
bfin_write_IMEM_CONTROL(imem);
SSYNC();
}
#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
static void
bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
unsigned long cplb_data, unsigned long mem_control,
unsigned long mem_mask)
{
int i;
for (i = 0; i < MAX_CPLBS; i++) {
bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr);
bfin_write32(cplb_data + i * 4, cplb_tbl[i].data);
}
_enable_cplb(mem_control, mem_mask);
}
#ifdef CONFIG_BFIN_ICACHE
void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
{
bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
(IMC | ENICPLB));
}
#endif
#ifdef CONFIG_BFIN_DCACHE
void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
{
/*
* Anomaly notes:
* 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
* register, so that the port preferences for DAG0 and DAG1 are set
* to port B
*/
bfin_cache_init(dcplb_tbl, DCPLB_ADDR0, DCPLB_DATA0, DMEM_CONTROL,
(DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0)));
}
#endif
#endif

View File

@@ -0,0 +1,103 @@
/*
* Blackfin cache control code
*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <asm/blackfin.h>
#include <asm/cache.h>
#include <asm/page.h>
.text
/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
#if ANOMALY_05000443
# define BROK_FLUSH_INST "IFLUSH"
#else
# define BROK_FLUSH_INST "no anomaly! yeah!"
#endif
/* Since all L1 caches work the same way, we use the same method for flushing
* them. Only the actual flush instruction differs. We write this in asm as
* GCC can be hard to coax into writing nice hardware loops.
*
* Also, we assume the following register setup:
* R0 = start address
* R1 = end address
*/
.macro do_flush flushins:req label
R2 = -L1_CACHE_BYTES;
/* start = (start & -L1_CACHE_BYTES) */
R0 = R0 & R2;
/* end = ((end - 1) & -L1_CACHE_BYTES) + L1_CACHE_BYTES; */
R1 += -1;
R1 = R1 & R2;
R1 += L1_CACHE_BYTES;
/* count = (end - start) >> L1_CACHE_SHIFT */
R2 = R1 - R0;
R2 >>= L1_CACHE_SHIFT;
P1 = R2;
.ifnb \label
\label :
.endif
P0 = R0;
LSETUP (1f, 2f) LC1 = P1;
1:
.ifeqs "\flushins", BROK_FLUSH_INST
\flushins [P0++];
2: nop;
.else
2: \flushins [P0++];
.endif
RTS;
.endm
/* Invalidate all instruction cache lines assocoiated with this memory area */
ENTRY(_blackfin_icache_flush_range)
/*
* Walkaround to avoid loading wrong instruction after invalidating icache
* and following sequence is met.
*
* 1) One instruction address is cached in the instruction cache.
* 2) This instruction in SDRAM is changed.
* 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
* 4) This instruction is executed again, but the old one is loaded.
*/
P0 = R0;
IFLUSH[P0];
do_flush IFLUSH
ENDPROC(_blackfin_icache_flush_range)
/* Throw away all D-cached data in specified region without any obligation to
* write them back. Since the Blackfin ISA does not have an "invalidate"
* instruction, we use flush/invalidate. Perhaps as a speed optimization we
* could bang on the DTEST MMRs ...
*/
ENTRY(_blackfin_dcache_invalidate_range)
do_flush FLUSHINV
ENDPROC(_blackfin_dcache_invalidate_range)
/* Flush all data cache lines assocoiated with this memory area */
ENTRY(_blackfin_dcache_flush_range)
do_flush FLUSH, .Ldfr
ENDPROC(_blackfin_dcache_flush_range)
/* Our headers convert the page structure to an address, so just need to flush
* its contents like normal. We know the start address is page aligned (which
* greater than our cache alignment), as is the end address. So just jump into
* the middle of the dcache flush function.
*/
ENTRY(_blackfin_dflush_page)
P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT);
jump .Ldfr;
ENDPROC(_blackfin_dflush_page)

View File

@@ -0,0 +1,95 @@
/*
* arch/blackfin/mach-common/clocks-init.c - reprogram clocks / memory
*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
#include <asm/dma.h>
#include <asm/clocks.h>
#include <asm/mem_init.h>
#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
#define PLL_CTL_VAL \
(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
(PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
__attribute__((l1_text))
static void do_sync(void)
{
__builtin_bfin_ssync();
}
__attribute__((l1_text))
void init_clocks(void)
{
/* Kill any active DMAs as they may trigger external memory accesses
* in the middle of reprogramming things, and that'll screw us up.
* For example, any automatic DMAs left by U-Boot for splash screens.
*/
size_t i;
for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
struct dma_register *dma = dma_io_base_addr[i];
dma->cfg = 0;
}
do_sync();
#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_ENABLE(0));
# ifdef SIC_IWR1
/* BF52x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
* http://blackfin.uclinux.org/gf/tracker/4323
*/
if (ANOMALY_05000435)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
# endif
# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_ENABLE(0));
#endif
do_sync();
#ifdef EBIU_SDGCTL
bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS);
do_sync();
#endif
#ifdef CLKBUFOE
bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE);
do_sync();
__asm__ __volatile__("IDLE;");
#endif
bfin_write_PLL_LOCKCNT(0x300);
do_sync();
/* We always write PLL_CTL thus avoiding Anomaly 05000242 */
bfin_write16(PLL_CTL, PLL_CTL_VAL);
__asm__ __volatile__("IDLE;");
bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
#ifdef EBIU_SDGCTL
bfin_write_EBIU_SDRRC(mem_SDRRC);
bfin_write_EBIU_SDGCTL((bfin_read_EBIU_SDGCTL() & SDGCTL_WIDTH) | mem_SDGCTL);
#else
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
do_sync();
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1);
bfin_write_EBIU_DDRCTL0(mem_DDRCTL0);
bfin_write_EBIU_DDRCTL1(mem_DDRCTL1);
bfin_write_EBIU_DDRCTL2(mem_DDRCTL2);
#ifdef CONFIG_MEM_EBIU_DDRQUE
bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
#endif
#endif
do_sync();
bfin_read16(0);
}

View File

@@ -0,0 +1,178 @@
/*
* Blackfin core clock scaling
*
* Copyright 2008-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/fs.h>
#include <asm/blackfin.h>
#include <asm/time.h>
/* this is the table of CCLK frequencies, in Hz */
/* .index is the entry in the auxillary dpm_state_table[] */
static struct cpufreq_frequency_table bfin_freq_table[] = {
{
.frequency = CPUFREQ_TABLE_END,
.index = 0,
},
{
.frequency = CPUFREQ_TABLE_END,
.index = 1,
},
{
.frequency = CPUFREQ_TABLE_END,
.index = 2,
},
{
.frequency = CPUFREQ_TABLE_END,
.index = 0,
},
};
static struct bfin_dpm_state {
unsigned int csel; /* system clock divider */
unsigned int tscale; /* change the divider on the core timer interrupt */
} dpm_state_table[3];
/*
normalized to maximum frequncy offset for CYCLES,
used in time-ts cycles clock source, but could be used
somewhere also.
*/
unsigned long long __bfin_cycles_off;
unsigned int __bfin_cycles_mod;
/**************************************************************************/
static unsigned int bfin_getfreq_khz(unsigned int cpu)
{
/* The driver only support single cpu */
if (cpu != 0)
return -1;
return get_cclk() / 1000;
}
static int bfin_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
unsigned int index, plldiv, tscale;
unsigned long flags, cclk_hz;
struct cpufreq_freqs freqs;
cycles_t cycles;
if (cpufreq_frequency_table_target(policy, bfin_freq_table,
target_freq, relation, &index))
return -EINVAL;
cclk_hz = bfin_freq_table[index].frequency;
freqs.old = bfin_getfreq_khz(0);
freqs.new = cclk_hz;
freqs.cpu = 0;
pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
cclk_hz, target_freq, freqs.old);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
local_irq_save_hw(flags);
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
tscale = dpm_state_table[index].tscale;
bfin_write_PLL_DIV(plldiv);
/* we have to adjust the core timer, because it is using cclk */
bfin_write_TSCALE(tscale);
cycles = get_cycles();
SSYNC();
cycles += 10; /* ~10 cycles we lose after get_cycles() */
__bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
__bfin_cycles_mod = index;
local_irq_restore_hw(flags);
/* TODO: just test case for cycles clock source, remove later */
pr_debug("cpufreq: done\n");
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return 0;
}
static int bfin_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, bfin_freq_table);
}
static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
{
unsigned long cclk, sclk, csel, min_cclk;
int index;
if (policy->cpu != 0)
return -EINVAL;
cclk = get_cclk() / 1000;
sclk = get_sclk() / 1000;
#if ANOMALY_05000273 || ANOMALY_05000274 || \
(!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
#endif
csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
bfin_freq_table[index].frequency = cclk >> index;
dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
bfin_freq_table[index].frequency,
dpm_state_table[index].csel,
dpm_state_table[index].tscale);
}
policy->cpuinfo.transition_latency = (bfin_read_PLL_LOCKCNT() / (sclk / 1000000)) * 1000;
/*Now ,only support one cpu */
policy->cur = cclk;
cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
}
static struct freq_attr *bfin_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver bfin_driver = {
.verify = bfin_verify_speed,
.target = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
.name = "bfin cpufreq",
.owner = THIS_MODULE,
.attr = bfin_freq_attr,
};
static int __init bfin_cpu_init(void)
{
return cpufreq_register_driver(&bfin_driver);
}
static void __exit bfin_cpu_exit(void)
{
cpufreq_unregister_driver(&bfin_driver);
}
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("cpufreq driver for Blackfin");
MODULE_LICENSE("GPL");
module_init(bfin_cpu_init);
module_exit(bfin_cpu_exit);

View File

@@ -0,0 +1,137 @@
/*
* Copyright 2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/cpufreq.h>
#include <asm/delay.h>
#include <asm/dpmc.h>
#define DRIVER_NAME "bfin dpmc"
#define dprintk(msg...) \
cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg)
struct bfin_dpmc_platform_data *pdata;
/**
* bfin_set_vlev - Update VLEV field in VR_CTL Reg.
* Avoid BYPASS sequence
*/
static void bfin_set_vlev(unsigned int vlev)
{
unsigned pll_lcnt;
pll_lcnt = bfin_read_PLL_LOCKCNT();
bfin_write_PLL_LOCKCNT(1);
bfin_write_VR_CTL((bfin_read_VR_CTL() & ~VLEV) | vlev);
bfin_write_PLL_LOCKCNT(pll_lcnt);
}
/**
* bfin_get_vlev - Get CPU specific VLEV from platform device data
*/
static unsigned int bfin_get_vlev(unsigned int freq)
{
int i;
if (!pdata)
goto err_out;
freq >>= 16;
for (i = 0; i < pdata->tabsize; i++)
if (freq <= (pdata->tuple_tab[i] & 0xFFFF))
return pdata->tuple_tab[i] >> 16;
err_out:
printk(KERN_WARNING "DPMC: No suitable CCLK VDDINT voltage pair found\n");
return VLEV_120;
}
#ifdef CONFIG_CPU_FREQ
static int
vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) {
bfin_set_vlev(bfin_get_vlev(freq->new));
udelay(pdata->vr_settling_time); /* Wait until Volatge settled */
} else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)
bfin_set_vlev(bfin_get_vlev(freq->new));
return 0;
}
static struct notifier_block vreg_cpufreq_notifier_block = {
.notifier_call = vreg_cpufreq_notifier
};
#endif /* CONFIG_CPU_FREQ */
/**
* bfin_dpmc_probe -
*
*/
static int __devinit bfin_dpmc_probe(struct platform_device *pdev)
{
if (pdev->dev.platform_data)
pdata = pdev->dev.platform_data;
else
return -EINVAL;
return cpufreq_register_notifier(&vreg_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
/**
* bfin_dpmc_remove -
*/
static int __devexit bfin_dpmc_remove(struct platform_device *pdev)
{
pdata = NULL;
return cpufreq_unregister_notifier(&vreg_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
struct platform_driver bfin_dpmc_device_driver = {
.probe = bfin_dpmc_probe,
.remove = __devexit_p(bfin_dpmc_remove),
.driver = {
.name = DRIVER_NAME,
}
};
/**
* bfin_dpmc_init - Init driver
*/
static int __init bfin_dpmc_init(void)
{
return platform_driver_register(&bfin_dpmc_device_driver);
}
module_init(bfin_dpmc_init);
/**
* bfin_dpmc_exit - break down driver
*/
static void __exit bfin_dpmc_exit(void)
{
platform_driver_unregister(&bfin_dpmc_device_driver);
}
module_exit(bfin_dpmc_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("cpu power management driver for Blackfin");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,824 @@
/*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <asm/blackfin.h>
#include <mach/irq.h>
#include <asm/dpmc.h>
.section .l1.text
ENTRY(_sleep_mode)
[--SP] = ( R7:0, P5:0 );
[--SP] = RETS;
call _set_sic_iwr;
R0 = 0xFFFF (Z);
call _set_rtc_istat;
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R1 = W[P0](z);
BITSET (R1, 3);
W[P0] = R1.L;
CLI R2;
SSYNC;
IDLE;
STI R2;
call _test_pll_locked;
R0 = IWR_ENABLE(0);
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr;
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R7 = w[p0](z);
BITCLR (R7, 3);
BITCLR (R7, 5);
w[p0] = R7.L;
IDLE;
call _test_pll_locked;
RETS = [SP++];
( R7:0, P5:0 ) = [SP++];
RTS;
ENDPROC(_sleep_mode)
ENTRY(_hibernate_mode)
[--SP] = ( R7:0, P5:0 );
[--SP] = RETS;
R3 = R0;
R0 = IWR_DISABLE_ALL;
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr;
call _set_dram_srfs;
SSYNC;
R0 = 0xFFFF (Z);
call _set_rtc_istat;
P0.H = hi(VR_CTL);
P0.L = lo(VR_CTL);
W[P0] = R3.L;
CLI R2;
IDLE;
.Lforever:
jump .Lforever;
ENDPROC(_hibernate_mode)
ENTRY(_sleep_deeper)
[--SP] = ( R7:0, P5:0 );
[--SP] = RETS;
CLI R4;
P3 = R0;
P4 = R1;
P5 = R2;
R0 = IWR_ENABLE(0);
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr;
call _set_dram_srfs; /* Set SDRAM Self Refresh */
/* Clear all the interrupts,bits sticky */
R0 = 0xFFFF (Z);
call _set_rtc_istat;
P0.H = hi(PLL_DIV);
P0.L = lo(PLL_DIV);
R6 = W[P0](z);
R0.L = 0xF;
W[P0] = R0.l; /* Set Max VCO to SCLK divider */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R5 = W[P0](z);
R0.L = (CONFIG_MIN_VCO_HZ/CONFIG_CLKIN_HZ) << 9;
W[P0] = R0.l; /* Set Min CLKIN to VCO multiplier */
SSYNC;
IDLE;
call _test_pll_locked;
P0.H = hi(VR_CTL);
P0.L = lo(VR_CTL);
R7 = W[P0](z);
R1 = 0x6;
R1 <<= 16;
R2 = 0x0404(Z);
R1 = R1|R2;
R2 = DEPOSIT(R7, R1);
W[P0] = R2; /* Set Min Core Voltage */
SSYNC;
IDLE;
call _test_pll_locked;
R0 = P3;
R1 = P4;
R3 = P5;
call _set_sic_iwr; /* Set Awake from IDLE */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R0 = W[P0](z);
BITSET (R0, 3);
W[P0] = R0.L; /* Turn CCLK OFF */
SSYNC;
IDLE;
call _test_pll_locked;
R0 = IWR_ENABLE(0);
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr; /* Set Awake from IDLE PLL */
P0.H = hi(VR_CTL);
P0.L = lo(VR_CTL);
W[P0]= R7;
SSYNC;
IDLE;
call _test_pll_locked;
P0.H = hi(PLL_DIV);
P0.L = lo(PLL_DIV);
W[P0]= R6; /* Restore CCLK and SCLK divider */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
w[p0] = R5; /* Restore VCO multiplier */
IDLE;
call _test_pll_locked;
call _unset_dram_srfs; /* SDRAM Self Refresh Off */
STI R4;
RETS = [SP++];
( R7:0, P5:0 ) = [SP++];
RTS;
ENDPROC(_sleep_deeper)
ENTRY(_set_dram_srfs)
/* set the dram to self refresh mode */
SSYNC;
#if defined(EBIU_RSTCTL) /* DDR */
P0.H = hi(EBIU_RSTCTL);
P0.L = lo(EBIU_RSTCTL);
R2 = [P0];
BITSET(R2, 3); /* SRREQ enter self-refresh mode */
[P0] = R2;
SSYNC;
1:
R2 = [P0];
CC = BITTST(R2, 4);
if !CC JUMP 1b;
#else /* SDRAM */
P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITSET(R2, 24); /* SRFS enter self-refresh mode */
[P0] = R2;
SSYNC;
P0.L = lo(EBIU_SDSTAT);
P0.H = hi(EBIU_SDSTAT);
1:
R2 = w[P0];
SSYNC;
cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
if !cc jump 1b;
P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITCLR(R2, 0); /* SCTLE disable CLKOUT */
[P0] = R2;
#endif
RTS;
ENDPROC(_set_dram_srfs)
ENTRY(_unset_dram_srfs)
/* set the dram out of self refresh mode */
#if defined(EBIU_RSTCTL) /* DDR */
P0.H = hi(EBIU_RSTCTL);
P0.L = lo(EBIU_RSTCTL);
R2 = [P0];
BITCLR(R2, 3); /* clear SRREQ bit */
[P0] = R2;
#elif defined(EBIU_SDGCTL) /* SDRAM */
P0.L = lo(EBIU_SDGCTL); /* release CLKOUT from self-refresh */
P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITSET(R2, 0); /* SCTLE enable CLKOUT */
[P0] = R2
SSYNC;
P0.L = lo(EBIU_SDGCTL); /* release SDRAM from self-refresh */
P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITCLR(R2, 24); /* clear SRFS bit */
[P0] = R2
#endif
SSYNC;
RTS;
ENDPROC(_unset_dram_srfs)
ENTRY(_set_sic_iwr)
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
P0.H = hi(SIC_IWR0);
P0.L = lo(SIC_IWR0);
P1.H = hi(SIC_IWR1);
P1.L = lo(SIC_IWR1);
[P1] = R1;
#if defined(CONFIG_BF54x)
P1.H = hi(SIC_IWR2);
P1.L = lo(SIC_IWR2);
[P1] = R2;
#endif
#else
P0.H = hi(SIC_IWR);
P0.L = lo(SIC_IWR);
#endif
[P0] = R0;
SSYNC;
RTS;
ENDPROC(_set_sic_iwr)
ENTRY(_set_rtc_istat)
#ifndef CONFIG_BF561
P0.H = hi(RTC_ISTAT);
P0.L = lo(RTC_ISTAT);
w[P0] = R0.L;
SSYNC;
#elif (ANOMALY_05000371)
nop;
nop;
nop;
nop;
#endif
RTS;
ENDPROC(_set_rtc_istat)
ENTRY(_test_pll_locked)
P0.H = hi(PLL_STAT);
P0.L = lo(PLL_STAT);
1:
R0 = W[P0] (Z);
CC = BITTST(R0,5);
IF !CC JUMP 1b;
RTS;
ENDPROC(_test_pll_locked)
.section .text
ENTRY(_do_hibernate)
[--SP] = ( R7:0, P5:0 );
[--SP] = RETS;
/* Save System MMRs */
R2 = R0;
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
#ifdef SIC_IMASK0
PM_SYS_PUSH(SIC_IMASK0)
#endif
#ifdef SIC_IMASK1
PM_SYS_PUSH(SIC_IMASK1)
#endif
#ifdef SIC_IMASK2
PM_SYS_PUSH(SIC_IMASK2)
#endif
#ifdef SIC_IMASK
PM_SYS_PUSH(SIC_IMASK)
#endif
#ifdef SICA_IMASK0
PM_SYS_PUSH(SICA_IMASK0)
#endif
#ifdef SICA_IMASK1
PM_SYS_PUSH(SICA_IMASK1)
#endif
#ifdef SIC_IAR2
PM_SYS_PUSH(SIC_IAR0)
PM_SYS_PUSH(SIC_IAR1)
PM_SYS_PUSH(SIC_IAR2)
#endif
#ifdef SIC_IAR3
PM_SYS_PUSH(SIC_IAR3)
#endif
#ifdef SIC_IAR4
PM_SYS_PUSH(SIC_IAR4)
PM_SYS_PUSH(SIC_IAR5)
PM_SYS_PUSH(SIC_IAR6)
#endif
#ifdef SIC_IAR7
PM_SYS_PUSH(SIC_IAR7)
#endif
#ifdef SIC_IAR8
PM_SYS_PUSH(SIC_IAR8)
PM_SYS_PUSH(SIC_IAR9)
PM_SYS_PUSH(SIC_IAR10)
PM_SYS_PUSH(SIC_IAR11)
#endif
#ifdef SICA_IAR0
PM_SYS_PUSH(SICA_IAR0)
PM_SYS_PUSH(SICA_IAR1)
PM_SYS_PUSH(SICA_IAR2)
PM_SYS_PUSH(SICA_IAR3)
PM_SYS_PUSH(SICA_IAR4)
PM_SYS_PUSH(SICA_IAR5)
PM_SYS_PUSH(SICA_IAR6)
PM_SYS_PUSH(SICA_IAR7)
#endif
#ifdef SIC_IWR
PM_SYS_PUSH(SIC_IWR)
#endif
#ifdef SIC_IWR0
PM_SYS_PUSH(SIC_IWR0)
#endif
#ifdef SIC_IWR1
PM_SYS_PUSH(SIC_IWR1)
#endif
#ifdef SIC_IWR2
PM_SYS_PUSH(SIC_IWR2)
#endif
#ifdef SICA_IWR0
PM_SYS_PUSH(SICA_IWR0)
#endif
#ifdef SICA_IWR1
PM_SYS_PUSH(SICA_IWR1)
#endif
#ifdef PINT0_ASSIGN
PM_SYS_PUSH(PINT0_MASK_SET)
PM_SYS_PUSH(PINT1_MASK_SET)
PM_SYS_PUSH(PINT2_MASK_SET)
PM_SYS_PUSH(PINT3_MASK_SET)
PM_SYS_PUSH(PINT0_ASSIGN)
PM_SYS_PUSH(PINT1_ASSIGN)
PM_SYS_PUSH(PINT2_ASSIGN)
PM_SYS_PUSH(PINT3_ASSIGN)
PM_SYS_PUSH(PINT0_INVERT_SET)
PM_SYS_PUSH(PINT1_INVERT_SET)
PM_SYS_PUSH(PINT2_INVERT_SET)
PM_SYS_PUSH(PINT3_INVERT_SET)
PM_SYS_PUSH(PINT0_EDGE_SET)
PM_SYS_PUSH(PINT1_EDGE_SET)
PM_SYS_PUSH(PINT2_EDGE_SET)
PM_SYS_PUSH(PINT3_EDGE_SET)
#endif
PM_SYS_PUSH(EBIU_AMBCTL0)
PM_SYS_PUSH(EBIU_AMBCTL1)
PM_SYS_PUSH16(EBIU_AMGCTL)
#ifdef EBIU_FCTL
PM_SYS_PUSH(EBIU_MBSCTL)
PM_SYS_PUSH(EBIU_MODE)
PM_SYS_PUSH(EBIU_FCTL)
#endif
PM_SYS_PUSH16(SYSCR)
/* Save Core MMRs */
P0.H = hi(SRAM_BASE_ADDRESS);
P0.L = lo(SRAM_BASE_ADDRESS);
PM_PUSH(DMEM_CONTROL)
PM_PUSH(DCPLB_ADDR0)
PM_PUSH(DCPLB_ADDR1)
PM_PUSH(DCPLB_ADDR2)
PM_PUSH(DCPLB_ADDR3)
PM_PUSH(DCPLB_ADDR4)
PM_PUSH(DCPLB_ADDR5)
PM_PUSH(DCPLB_ADDR6)
PM_PUSH(DCPLB_ADDR7)
PM_PUSH(DCPLB_ADDR8)
PM_PUSH(DCPLB_ADDR9)
PM_PUSH(DCPLB_ADDR10)
PM_PUSH(DCPLB_ADDR11)
PM_PUSH(DCPLB_ADDR12)
PM_PUSH(DCPLB_ADDR13)
PM_PUSH(DCPLB_ADDR14)
PM_PUSH(DCPLB_ADDR15)
PM_PUSH(DCPLB_DATA0)
PM_PUSH(DCPLB_DATA1)
PM_PUSH(DCPLB_DATA2)
PM_PUSH(DCPLB_DATA3)
PM_PUSH(DCPLB_DATA4)
PM_PUSH(DCPLB_DATA5)
PM_PUSH(DCPLB_DATA6)
PM_PUSH(DCPLB_DATA7)
PM_PUSH(DCPLB_DATA8)
PM_PUSH(DCPLB_DATA9)
PM_PUSH(DCPLB_DATA10)
PM_PUSH(DCPLB_DATA11)
PM_PUSH(DCPLB_DATA12)
PM_PUSH(DCPLB_DATA13)
PM_PUSH(DCPLB_DATA14)
PM_PUSH(DCPLB_DATA15)
PM_PUSH(IMEM_CONTROL)
PM_PUSH(ICPLB_ADDR0)
PM_PUSH(ICPLB_ADDR1)
PM_PUSH(ICPLB_ADDR2)
PM_PUSH(ICPLB_ADDR3)
PM_PUSH(ICPLB_ADDR4)
PM_PUSH(ICPLB_ADDR5)
PM_PUSH(ICPLB_ADDR6)
PM_PUSH(ICPLB_ADDR7)
PM_PUSH(ICPLB_ADDR8)
PM_PUSH(ICPLB_ADDR9)
PM_PUSH(ICPLB_ADDR10)
PM_PUSH(ICPLB_ADDR11)
PM_PUSH(ICPLB_ADDR12)
PM_PUSH(ICPLB_ADDR13)
PM_PUSH(ICPLB_ADDR14)
PM_PUSH(ICPLB_ADDR15)
PM_PUSH(ICPLB_DATA0)
PM_PUSH(ICPLB_DATA1)
PM_PUSH(ICPLB_DATA2)
PM_PUSH(ICPLB_DATA3)
PM_PUSH(ICPLB_DATA4)
PM_PUSH(ICPLB_DATA5)
PM_PUSH(ICPLB_DATA6)
PM_PUSH(ICPLB_DATA7)
PM_PUSH(ICPLB_DATA8)
PM_PUSH(ICPLB_DATA9)
PM_PUSH(ICPLB_DATA10)
PM_PUSH(ICPLB_DATA11)
PM_PUSH(ICPLB_DATA12)
PM_PUSH(ICPLB_DATA13)
PM_PUSH(ICPLB_DATA14)
PM_PUSH(ICPLB_DATA15)
PM_PUSH(EVT0)
PM_PUSH(EVT1)
PM_PUSH(EVT2)
PM_PUSH(EVT3)
PM_PUSH(EVT4)
PM_PUSH(EVT5)
PM_PUSH(EVT6)
PM_PUSH(EVT7)
PM_PUSH(EVT8)
PM_PUSH(EVT9)
PM_PUSH(EVT10)
PM_PUSH(EVT11)
PM_PUSH(EVT12)
PM_PUSH(EVT13)
PM_PUSH(EVT14)
PM_PUSH(EVT15)
PM_PUSH(IMASK)
PM_PUSH(ILAT)
PM_PUSH(IPRIO)
PM_PUSH(TCNTL)
PM_PUSH(TPERIOD)
PM_PUSH(TSCALE)
PM_PUSH(TCOUNT)
PM_PUSH(TBUFCTL)
/* Save Core Registers */
[--sp] = SYSCFG;
[--sp] = ( R7:0, P5:0 );
[--sp] = fp;
[--sp] = usp;
[--sp] = i0;
[--sp] = i1;
[--sp] = i2;
[--sp] = i3;
[--sp] = m0;
[--sp] = m1;
[--sp] = m2;
[--sp] = m3;
[--sp] = l0;
[--sp] = l1;
[--sp] = l2;
[--sp] = l3;
[--sp] = b0;
[--sp] = b1;
[--sp] = b2;
[--sp] = b3;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;
[--sp] = ASTAT;
[--sp] = CYCLES;
[--sp] = CYCLES2;
[--sp] = RETS;
r0 = RETI;
[--sp] = r0;
[--sp] = RETX;
[--sp] = RETN;
[--sp] = RETE;
[--sp] = SEQSTAT;
/* Save Magic, return address and Stack Pointer */
P0.H = 0;
P0.L = 0;
R0.H = 0xDEAD; /* Hibernate Magic */
R0.L = 0xBEEF;
[P0++] = R0; /* Store Hibernate Magic */
R0.H = .Lpm_resume_here;
R0.L = .Lpm_resume_here;
[P0++] = R0; /* Save Return Address */
[P0++] = SP; /* Save Stack Pointer */
P0.H = _hibernate_mode;
P0.L = _hibernate_mode;
R0 = R2;
call (P0); /* Goodbye */
.Lpm_resume_here:
/* Restore Core Registers */
SEQSTAT = [sp++];
RETE = [sp++];
RETN = [sp++];
RETX = [sp++];
r0 = [sp++];
RETI = r0;
RETS = [sp++];
CYCLES2 = [sp++];
CYCLES = [sp++];
ASTAT = [sp++];
LB1 = [sp++];
LB0 = [sp++];
LT1 = [sp++];
LT0 = [sp++];
LC1 = [sp++];
LC0 = [sp++];
a1.w = [sp++];
a1.x = [sp++];
a0.w = [sp++];
a0.x = [sp++];
b3 = [sp++];
b2 = [sp++];
b1 = [sp++];
b0 = [sp++];
l3 = [sp++];
l2 = [sp++];
l1 = [sp++];
l0 = [sp++];
m3 = [sp++];
m2 = [sp++];
m1 = [sp++];
m0 = [sp++];
i3 = [sp++];
i2 = [sp++];
i1 = [sp++];
i0 = [sp++];
usp = [sp++];
fp = [sp++];
( R7 : 0, P5 : 0) = [ SP ++ ];
SYSCFG = [sp++];
/* Restore Core MMRs */
PM_POP(TBUFCTL)
PM_POP(TCOUNT)
PM_POP(TSCALE)
PM_POP(TPERIOD)
PM_POP(TCNTL)
PM_POP(IPRIO)
PM_POP(ILAT)
PM_POP(IMASK)
PM_POP(EVT15)
PM_POP(EVT14)
PM_POP(EVT13)
PM_POP(EVT12)
PM_POP(EVT11)
PM_POP(EVT10)
PM_POP(EVT9)
PM_POP(EVT8)
PM_POP(EVT7)
PM_POP(EVT6)
PM_POP(EVT5)
PM_POP(EVT4)
PM_POP(EVT3)
PM_POP(EVT2)
PM_POP(EVT1)
PM_POP(EVT0)
PM_POP(ICPLB_DATA15)
PM_POP(ICPLB_DATA14)
PM_POP(ICPLB_DATA13)
PM_POP(ICPLB_DATA12)
PM_POP(ICPLB_DATA11)
PM_POP(ICPLB_DATA10)
PM_POP(ICPLB_DATA9)
PM_POP(ICPLB_DATA8)
PM_POP(ICPLB_DATA7)
PM_POP(ICPLB_DATA6)
PM_POP(ICPLB_DATA5)
PM_POP(ICPLB_DATA4)
PM_POP(ICPLB_DATA3)
PM_POP(ICPLB_DATA2)
PM_POP(ICPLB_DATA1)
PM_POP(ICPLB_DATA0)
PM_POP(ICPLB_ADDR15)
PM_POP(ICPLB_ADDR14)
PM_POP(ICPLB_ADDR13)
PM_POP(ICPLB_ADDR12)
PM_POP(ICPLB_ADDR11)
PM_POP(ICPLB_ADDR10)
PM_POP(ICPLB_ADDR9)
PM_POP(ICPLB_ADDR8)
PM_POP(ICPLB_ADDR7)
PM_POP(ICPLB_ADDR6)
PM_POP(ICPLB_ADDR5)
PM_POP(ICPLB_ADDR4)
PM_POP(ICPLB_ADDR3)
PM_POP(ICPLB_ADDR2)
PM_POP(ICPLB_ADDR1)
PM_POP(ICPLB_ADDR0)
PM_POP(IMEM_CONTROL)
PM_POP(DCPLB_DATA15)
PM_POP(DCPLB_DATA14)
PM_POP(DCPLB_DATA13)
PM_POP(DCPLB_DATA12)
PM_POP(DCPLB_DATA11)
PM_POP(DCPLB_DATA10)
PM_POP(DCPLB_DATA9)
PM_POP(DCPLB_DATA8)
PM_POP(DCPLB_DATA7)
PM_POP(DCPLB_DATA6)
PM_POP(DCPLB_DATA5)
PM_POP(DCPLB_DATA4)
PM_POP(DCPLB_DATA3)
PM_POP(DCPLB_DATA2)
PM_POP(DCPLB_DATA1)
PM_POP(DCPLB_DATA0)
PM_POP(DCPLB_ADDR15)
PM_POP(DCPLB_ADDR14)
PM_POP(DCPLB_ADDR13)
PM_POP(DCPLB_ADDR12)
PM_POP(DCPLB_ADDR11)
PM_POP(DCPLB_ADDR10)
PM_POP(DCPLB_ADDR9)
PM_POP(DCPLB_ADDR8)
PM_POP(DCPLB_ADDR7)
PM_POP(DCPLB_ADDR6)
PM_POP(DCPLB_ADDR5)
PM_POP(DCPLB_ADDR4)
PM_POP(DCPLB_ADDR3)
PM_POP(DCPLB_ADDR2)
PM_POP(DCPLB_ADDR1)
PM_POP(DCPLB_ADDR0)
PM_POP(DMEM_CONTROL)
/* Restore System MMRs */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
PM_SYS_POP16(SYSCR)
#ifdef EBIU_FCTL
PM_SYS_POP(EBIU_FCTL)
PM_SYS_POP(EBIU_MODE)
PM_SYS_POP(EBIU_MBSCTL)
#endif
PM_SYS_POP16(EBIU_AMGCTL)
PM_SYS_POP(EBIU_AMBCTL1)
PM_SYS_POP(EBIU_AMBCTL0)
#ifdef PINT0_ASSIGN
PM_SYS_POP(PINT3_EDGE_SET)
PM_SYS_POP(PINT2_EDGE_SET)
PM_SYS_POP(PINT1_EDGE_SET)
PM_SYS_POP(PINT0_EDGE_SET)
PM_SYS_POP(PINT3_INVERT_SET)
PM_SYS_POP(PINT2_INVERT_SET)
PM_SYS_POP(PINT1_INVERT_SET)
PM_SYS_POP(PINT0_INVERT_SET)
PM_SYS_POP(PINT3_ASSIGN)
PM_SYS_POP(PINT2_ASSIGN)
PM_SYS_POP(PINT1_ASSIGN)
PM_SYS_POP(PINT0_ASSIGN)
PM_SYS_POP(PINT3_MASK_SET)
PM_SYS_POP(PINT2_MASK_SET)
PM_SYS_POP(PINT1_MASK_SET)
PM_SYS_POP(PINT0_MASK_SET)
#endif
#ifdef SICA_IWR1
PM_SYS_POP(SICA_IWR1)
#endif
#ifdef SICA_IWR0
PM_SYS_POP(SICA_IWR0)
#endif
#ifdef SIC_IWR2
PM_SYS_POP(SIC_IWR2)
#endif
#ifdef SIC_IWR1
PM_SYS_POP(SIC_IWR1)
#endif
#ifdef SIC_IWR0
PM_SYS_POP(SIC_IWR0)
#endif
#ifdef SIC_IWR
PM_SYS_POP(SIC_IWR)
#endif
#ifdef SICA_IAR0
PM_SYS_POP(SICA_IAR7)
PM_SYS_POP(SICA_IAR6)
PM_SYS_POP(SICA_IAR5)
PM_SYS_POP(SICA_IAR4)
PM_SYS_POP(SICA_IAR3)
PM_SYS_POP(SICA_IAR2)
PM_SYS_POP(SICA_IAR1)
PM_SYS_POP(SICA_IAR0)
#endif
#ifdef SIC_IAR8
PM_SYS_POP(SIC_IAR11)
PM_SYS_POP(SIC_IAR10)
PM_SYS_POP(SIC_IAR9)
PM_SYS_POP(SIC_IAR8)
#endif
#ifdef SIC_IAR7
PM_SYS_POP(SIC_IAR7)
#endif
#ifdef SIC_IAR6
PM_SYS_POP(SIC_IAR6)
PM_SYS_POP(SIC_IAR5)
PM_SYS_POP(SIC_IAR4)
#endif
#ifdef SIC_IAR3
PM_SYS_POP(SIC_IAR3)
#endif
#ifdef SIC_IAR2
PM_SYS_POP(SIC_IAR2)
PM_SYS_POP(SIC_IAR1)
PM_SYS_POP(SIC_IAR0)
#endif
#ifdef SICA_IMASK1
PM_SYS_POP(SICA_IMASK1)
#endif
#ifdef SICA_IMASK0
PM_SYS_POP(SICA_IMASK0)
#endif
#ifdef SIC_IMASK
PM_SYS_POP(SIC_IMASK)
#endif
#ifdef SIC_IMASK2
PM_SYS_POP(SIC_IMASK2)
#endif
#ifdef SIC_IMASK1
PM_SYS_POP(SIC_IMASK1)
#endif
#ifdef SIC_IMASK0
PM_SYS_POP(SIC_IMASK0)
#endif
[--sp] = RETI; /* Clear Global Interrupt Disable */
SP += 4;
RETS = [SP++];
( R7:0, P5:0 ) = [SP++];
RTS;
ENDPROC(_do_hibernate)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,271 @@
/*
* Common Blackfin startup code
*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
#include <asm/thread_info.h>
#include <asm/trace.h>
#include <asm/asm-offsets.h>
__INIT
ENTRY(__init_clear_bss)
r2 = r2 - r1;
cc = r2 == 0;
if cc jump .L_bss_done;
r2 >>= 2;
p1 = r1;
p2 = r2;
lsetup (1f, 1f) lc0 = p2;
1: [p1++] = r0;
.L_bss_done:
rts;
ENDPROC(__init_clear_bss)
ENTRY(__start)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
/* Enable Cycle Counter and Nesting Of Interrupts */
#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
R0 = SYSCFG_SNEN;
#else
R0 = SYSCFG_SNEN | SYSCFG_CCEN;
#endif
SYSCFG = R0;
R0 = 0;
/* Clear Out All the data and pointer Registers */
R1 = R0;
R2 = R0;
R3 = R0;
R4 = R0;
R5 = R0;
R6 = R0;
P0 = R0;
P1 = R0;
P2 = R0;
P3 = R0;
P4 = R0;
P5 = R0;
LC0 = r0;
LC1 = r0;
L0 = r0;
L1 = r0;
L2 = r0;
L3 = r0;
/* Clear Out All the DAG Registers */
B0 = r0;
B1 = r0;
B2 = r0;
B3 = r0;
I0 = r0;
I1 = r0;
I2 = r0;
I3 = r0;
M0 = r0;
M1 = r0;
M2 = r0;
M3 = r0;
/*
* Clear ITEST_COMMAND and DTEST_COMMAND registers,
* Leaving these as non-zero can confuse the emulator
*/
p0.L = LO(DTEST_COMMAND);
p0.H = HI(DTEST_COMMAND);
[p0] = R0;
[p0 + (ITEST_COMMAND - DTEST_COMMAND)] = R0;
CSYNC;
trace_buffer_init(p0,r0);
P0 = R1;
R0 = R1;
/* Turn off the icache */
p0.l = LO(IMEM_CONTROL);
p0.h = HI(IMEM_CONTROL);
R1 = [p0];
R0 = ~ENICPLB;
R0 = R0 & R1;
[p0] = R0;
SSYNC;
/* Turn off the dcache */
p0.l = LO(DMEM_CONTROL);
p0.h = HI(DMEM_CONTROL);
R1 = [p0];
R0 = ~ENDCPLB;
R0 = R0 & R1;
[p0] = R0;
SSYNC;
/* in case of double faults, save a few things */
p0.l = _init_retx;
p0.h = _init_retx;
R0 = RETX;
[P0] = R0;
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* Only save these if we are storing them,
* This happens here, since L1 gets clobbered
* below
*/
GET_PDA(p0, r0);
r6 = [p0 + PDA_DF_RETX];
p1.l = _init_saved_retx;
p1.h = _init_saved_retx;
[p1] = r6;
r6 = [p0 + PDA_DF_DCPLB];
p1.l = _init_saved_dcplb_fault_addr;
p1.h = _init_saved_dcplb_fault_addr;
[p1] = r6;
r6 = [p0 + PDA_DF_ICPLB];
p1.l = _init_saved_icplb_fault_addr;
p1.h = _init_saved_icplb_fault_addr;
[p1] = r6;
r6 = [p0 + PDA_DF_SEQSTAT];
p1.l = _init_saved_seqstat;
p1.h = _init_saved_seqstat;
[p1] = r6;
#endif
/* Initialize stack pointer */
sp.l = _init_thread_union;
sp.h = _init_thread_union;
fp = sp;
usp = sp;
#ifdef CONFIG_EARLY_PRINTK
call _init_early_exception_vectors;
r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
sti r0;
#endif
r0 = 0 (x);
/* Zero out all of the fun bss regions */
#if L1_DATA_A_LENGTH > 0
r1.l = __sbss_l1;
r1.h = __sbss_l1;
r2.l = __ebss_l1;
r2.h = __ebss_l1;
call __init_clear_bss
#endif
#if L1_DATA_B_LENGTH > 0
r1.l = __sbss_b_l1;
r1.h = __sbss_b_l1;
r2.l = __ebss_b_l1;
r2.h = __ebss_b_l1;
call __init_clear_bss
#endif
#if L2_LENGTH > 0
r1.l = __sbss_l2;
r1.h = __sbss_l2;
r2.l = __ebss_l2;
r2.h = __ebss_l2;
call __init_clear_bss
#endif
r1.l = ___bss_start;
r1.h = ___bss_start;
r2.l = ___bss_stop;
r2.h = ___bss_stop;
call __init_clear_bss
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
call _bfin_relocate_l1_mem;
#ifdef CONFIG_BFIN_KERNEL_CLOCK
/* Only use on-chip scratch space for stack when absolutely required
* to avoid Anomaly 05000227 ... we know the init_clocks() func only
* uses L1 text and stack space and no other memory region.
*/
# define KERNEL_CLOCK_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
sp.l = lo(KERNEL_CLOCK_STACK);
sp.h = hi(KERNEL_CLOCK_STACK);
call _init_clocks;
sp = usp; /* usp hasnt been touched, so restore from there */
#endif
/* This section keeps the processor in supervisor mode
* during kernel boot. Switches to user mode at end of boot.
* See page 3-9 of Hardware Reference manual for documentation.
*/
/* EVT15 = _real_start */
p0.l = lo(EVT15);
p0.h = hi(EVT15);
p1.l = _real_start;
p1.h = _real_start;
[p0] = p1;
csync;
#ifdef CONFIG_EARLY_PRINTK
r0 = (EVT_IVG15 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU) (z);
#else
r0 = EVT_IVG15 (z);
#endif
sti r0;
raise 15;
#ifdef CONFIG_EARLY_PRINTK
p0.l = _early_trap;
p0.h = _early_trap;
#else
p0.l = .LWAIT_HERE;
p0.h = .LWAIT_HERE;
#endif
reti = p0;
#if ANOMALY_05000281
nop; nop; nop;
#endif
rti;
.LWAIT_HERE:
jump .LWAIT_HERE;
ENDPROC(__start)
/* A little BF561 glue ... */
#ifndef WDOG_CTL
# define WDOG_CTL WDOGA_CTL
#endif
ENTRY(_real_start)
/* Enable nested interrupts */
[--sp] = reti;
/* watchdog off for now */
p0.l = lo(WDOG_CTL);
p0.h = hi(WDOG_CTL);
r0 = 0xAD6(z);
w[p0] = r0;
ssync;
/* Pass the u-boot arguments to the global value command line */
R0 = R7;
call _cmdline_init;
/* Load the current thread pointer and stack */
p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */
sp = sp + p1;
usp = sp;
fp = sp;
sp += -12;
call _init_pda
sp += 12;
jump.l _start_kernel;
ENDPROC(_real_start)
__FINIT

View File

@@ -0,0 +1,286 @@
/*
* Interrupt Entries
*
* Copyright 2005-2009 Analog Devices Inc.
* D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
* Kenneth Albanowski <kjahds@kjahds.com>
*
* Licensed under the GPL-2 or later.
*/
#include <asm/blackfin.h>
#include <mach/irq.h>
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/trace.h>
#include <asm/traps.h>
#include <asm/thread_info.h>
#include <asm/context.S>
.extern _ret_from_exception
#ifdef CONFIG_I_ENTRY_L1
.section .l1.text
#else
.text
#endif
.align 4 /* just in case */
/* Common interrupt entry code. First we do CLI, then push
* RETI, to keep interrupts disabled, but to allow this state to be changed
* by local_bh_enable.
* R0 contains the interrupt number, while R1 may contain the value of IPEND,
* or garbage if IPEND won't be needed by the ISR. */
__common_int_entry:
[--sp] = fp;
[--sp] = usp;
[--sp] = i0;
[--sp] = i1;
[--sp] = i2;
[--sp] = i3;
[--sp] = m0;
[--sp] = m1;
[--sp] = m2;
[--sp] = m3;
[--sp] = l0;
[--sp] = l1;
[--sp] = l2;
[--sp] = l3;
[--sp] = b0;
[--sp] = b1;
[--sp] = b2;
[--sp] = b3;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;
[--sp] = ASTAT;
[--sp] = r0; /* Skip reserved */
[--sp] = RETS;
r2 = RETI;
[--sp] = r2;
[--sp] = RETX;
[--sp] = RETN;
[--sp] = RETE;
[--sp] = SEQSTAT;
[--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
/* Switch to other method of keeping interrupts disabled. */
#ifdef CONFIG_DEBUG_HWERR
r1 = 0x3f;
sti r1;
#else
cli r1;
#endif
[--sp] = RETI; /* orig_pc */
/* Clear all L registers. */
r1 = 0 (x);
l0 = r1;
l1 = r1;
l2 = r1;
l3 = r1;
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
ANOMALY_283_315_WORKAROUND(p5, r7)
r1 = sp;
SP += -12;
#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump .Lcommon_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
#endif /* CONFIG_IPIPE */
call _return_from_int;
.Lcommon_restore_context:
RESTORE_CONTEXT
rti;
/* interrupt routine for ivhw - 5 */
ENTRY(_evt_ivhw)
/* In case a single action kicks off multiple memory transactions, (like
* a cache line fetch, - this can cause multiple hardware errors, let's
* catch them all. First - make sure all the actions are complete, and
* the core sees the hardware errors.
*/
SSYNC;
SSYNC;
SAVE_ALL_SYS
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
ANOMALY_283_315_WORKAROUND(p5, r7)
/* Handle all stacked hardware errors
* To make sure we don't hang forever, only do it 10 times
*/
R0 = 0;
R2 = 10;
1:
P0.L = LO(ILAT);
P0.H = HI(ILAT);
R1 = [P0];
CC = BITTST(R1, EVT_IVHW_P);
IF ! CC JUMP 2f;
/* OK a hardware error is pending - clear it */
R1 = EVT_IVHW_P;
[P0] = R1;
R0 += 1;
CC = R1 == R2;
if CC JUMP 2f;
JUMP 1b;
2:
# We are going to dump something out, so make sure we print IPEND properly
p2.l = lo(IPEND);
p2.h = hi(IPEND);
r0 = [p2];
[sp + PT_IPEND] = r0;
/* set the EXCAUSE to HWERR for trap_c */
r0 = [sp + PT_SEQSTAT];
R1.L = LO(VEC_HWERR);
R1.H = HI(VEC_HWERR);
R0 = R0 | R1;
[sp + PT_SEQSTAT] = R0;
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
call _trap_c;
SP += 12;
#ifdef EBIU_ERRMST
/* make sure EBIU_ERRMST is clear */
p0.l = LO(EBIU_ERRMST);
p0.h = HI(EBIU_ERRMST);
r0.l = (CORE_ERROR | CORE_MERROR);
w[p0] = r0.l;
#endif
call _ret_from_exception;
.Lcommon_restore_all_sys:
RESTORE_ALL_SYS
rti;
ENDPROC(_evt_ivhw)
/* Interrupt routine for evt2 (NMI).
* We don't actually use this, so just return.
* For inner circle type details, please see:
* http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
*/
ENTRY(_evt_nmi)
.weak _evt_nmi
rtn;
ENDPROC(_evt_nmi)
/* interrupt routine for core timer - 6 */
ENTRY(_evt_timer)
TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
/* interrupt routine for evt7 - 7 */
ENTRY(_evt_evt7)
INTERRUPT_ENTRY(EVT_IVG7_P)
ENTRY(_evt_evt8)
INTERRUPT_ENTRY(EVT_IVG8_P)
ENTRY(_evt_evt9)
INTERRUPT_ENTRY(EVT_IVG9_P)
ENTRY(_evt_evt10)
INTERRUPT_ENTRY(EVT_IVG10_P)
ENTRY(_evt_evt11)
INTERRUPT_ENTRY(EVT_IVG11_P)
ENTRY(_evt_evt12)
INTERRUPT_ENTRY(EVT_IVG12_P)
ENTRY(_evt_evt13)
INTERRUPT_ENTRY(EVT_IVG13_P)
/* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
call _system_call;
jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)
#ifdef CONFIG_IPIPE
/*
* __ipipe_call_irqtail: lowers the current priority level to EVT15
* before running a user-defined routine, then raises the priority
* level to EVT14 to prepare the caller for a normal interrupt
* return through RTI.
*
* We currently use this facility in two occasions:
*
* - to branch to __ipipe_irq_tail_hook as requested by a high
* priority domain after the pipeline delivered an interrupt,
* e.g. such as Xenomai, in order to start its rescheduling
* procedure, since we may not switch tasks when IRQ levels are
* nested on the Blackfin, so we have to fake an interrupt return
* so that we may reschedule immediately.
*
* - to branch to sync_root_irqs, in order to play any interrupt
* pending for the root domain (i.e. the Linux kernel). This lowers
* the core priority level enough so that Linux IRQ handlers may
* never delay interrupts handled by high priority domains; we defer
* those handlers until this point instead. This is a substitute
* to using a threaded interrupt model for the Linux kernel.
*
* r0: address of user-defined routine
* context: caller must have preempted EVT15, hw interrupts must be off.
*/
ENTRY(___ipipe_call_irqtail)
p0 = r0;
r0.l = 1f;
r0.h = 1f;
reti = r0;
rti;
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
sp += -12;
call (p0);
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];
#ifdef CONFIG_DEBUG_HWERR
/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
r0 = (EVT_IVG14 | EVT_IVHW | \
EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
#else
/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
r0 = (EVT_IVG14 | \
EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
#endif
sti r0;
raise 14; /* Branches to _evt_evt14 */
2:
jump 2b; /* Likely paranoid. */
ENDPROC(___ipipe_call_irqtail)
#endif /* CONFIG_IPIPE */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,106 @@
/*
* panic kernel with dump information
*
* Copyright 2005-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <asm/blackfin.h>
#define L1_ICACHE_START 0xffa10000
#define L1_ICACHE_END 0xffa13fff
/*
* irq_panic - calls panic with string setup
*/
__attribute__ ((l1_text))
asmlinkage void irq_panic(int reason, struct pt_regs *regs)
{
unsigned int cmd, tag, ca, cache_hi, cache_lo, *pa;
unsigned short i, j, die;
unsigned int bad[10][6];
/* check entire cache for coherency
* Since printk is in cacheable memory,
* don't call it until you have checked everything
*/
die = 0;
i = 0;
/* check icache */
for (ca = L1_ICACHE_START; ca <= L1_ICACHE_END && i < 10; ca += 32) {
/* Grab various address bits for the itest_cmd fields */
cmd = (((ca & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
((ca & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
((ca & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
0); /* Access Tag, Read access */
SSYNC();
bfin_write_ITEST_COMMAND(cmd);
SSYNC();
tag = bfin_read_ITEST_DATA0();
SSYNC();
/* if tag is marked as valid, check it */
if (tag & 1) {
/* The icache is arranged in 4 groups of 64-bits */
for (j = 0; j < 32; j += 8) {
cmd = ((((ca + j) & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
(((ca + j) & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
(((ca + j) & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
4); /* Access Data, Read access */
SSYNC();
bfin_write_ITEST_COMMAND(cmd);
SSYNC();
cache_hi = bfin_read_ITEST_DATA1();
cache_lo = bfin_read_ITEST_DATA0();
pa = ((unsigned int *)((tag & 0xffffcc00) |
((ca + j) & ~(0xffffcc00))));
/*
* Debugging this, enable
*
* printk("addr: %08x %08x%08x | %08x%08x\n",
* ((unsigned int *)((tag & 0xffffcc00) | ((ca+j) & ~(0xffffcc00)))),
* cache_hi, cache_lo, *(pa+1), *pa);
*/
if (cache_hi != *(pa + 1) || cache_lo != *pa) {
/* Since icache is not working, stay out of it, by not printing */
die = 1;
bad[i][0] = (ca + j);
bad[i][1] = cache_hi;
bad[i][2] = cache_lo;
bad[i][3] = ((tag & 0xffffcc00) |
((ca + j) & ~(0xffffcc00)));
bad[i][4] = *(pa + 1);
bad[i][5] = *(pa);
i++;
}
}
}
}
if (die) {
printk(KERN_EMERG "icache coherency error\n");
for (j = 0; j <= i; j++) {
printk(KERN_EMERG
"cache address : %08x cache value : %08x%08x\n",
bad[j][0], bad[j][1], bad[j][2]);
printk(KERN_EMERG
"physical address: %08x SDRAM value : %08x%08x\n",
bad[j][3], bad[j][4], bad[j][5]);
}
panic("icache coherency error");
} else
printk(KERN_EMERG "icache checked, and OK\n");
}

View File

@@ -0,0 +1,269 @@
/*
* Blackfin power management
*
* Copyright 2006-2009 Analog Devices Inc.
*
* Licensed under the GPL-2
* based on arm/mach-omap/pm.c
* Copyright 2001, Cliff Brake <cbrake@accelent.com> and others
*/
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/cplb.h>
#include <asm/gpio.h>
#include <asm/dma.h>
#include <asm/dpmc.h>
#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_H
#define WAKEUP_TYPE PM_WAKE_HIGH
#endif
#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_L
#define WAKEUP_TYPE PM_WAKE_LOW
#endif
#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_F
#define WAKEUP_TYPE PM_WAKE_FALLING
#endif
#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_R
#define WAKEUP_TYPE PM_WAKE_RISING
#endif
#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_B
#define WAKEUP_TYPE PM_WAKE_BOTH_EDGES
#endif
void bfin_pm_suspend_standby_enter(void)
{
unsigned long flags;
#ifdef CONFIG_PM_WAKEUP_BY_GPIO
gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
#endif
local_irq_save_hw(flags);
bfin_pm_standby_setup();
#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
sleep_deeper(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
#else
sleep_mode(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
#endif
bfin_pm_standby_restore();
#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
# ifdef SIC_IWR1
/* BF52x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
* http://blackfin.uclinux.org/gf/tracker/4323
*/
if (ANOMALY_05000435)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
# endif
# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
local_irq_restore_hw(flags);
}
int bf53x_suspend_l1_mem(unsigned char *memptr)
{
dma_memcpy(memptr, (const void *) L1_CODE_START, L1_CODE_LENGTH);
dma_memcpy(memptr + L1_CODE_LENGTH, (const void *) L1_DATA_A_START,
L1_DATA_A_LENGTH);
dma_memcpy(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH,
(const void *) L1_DATA_B_START, L1_DATA_B_LENGTH);
memcpy(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH +
L1_DATA_B_LENGTH, (const void *) L1_SCRATCH_START,
L1_SCRATCH_LENGTH);
return 0;
}
int bf53x_resume_l1_mem(unsigned char *memptr)
{
dma_memcpy((void *) L1_CODE_START, memptr, L1_CODE_LENGTH);
dma_memcpy((void *) L1_DATA_A_START, memptr + L1_CODE_LENGTH,
L1_DATA_A_LENGTH);
dma_memcpy((void *) L1_DATA_B_START, memptr + L1_CODE_LENGTH +
L1_DATA_A_LENGTH, L1_DATA_B_LENGTH);
memcpy((void *) L1_SCRATCH_START, memptr + L1_CODE_LENGTH +
L1_DATA_A_LENGTH + L1_DATA_B_LENGTH, L1_SCRATCH_LENGTH);
return 0;
}
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
static void flushinv_all_dcache(void)
{
u32 way, bank, subbank, set;
u32 status, addr;
u32 dmem_ctl = bfin_read_DMEM_CONTROL();
for (bank = 0; bank < 2; ++bank) {
if (!(dmem_ctl & (1 << (DMC1_P - bank))))
continue;
for (way = 0; way < 2; ++way)
for (subbank = 0; subbank < 4; ++subbank)
for (set = 0; set < 64; ++set) {
bfin_write_DTEST_COMMAND(
way << 26 |
bank << 23 |
subbank << 16 |
set << 5
);
CSYNC();
status = bfin_read_DTEST_DATA0();
/* only worry about valid/dirty entries */
if ((status & 0x3) != 0x3)
continue;
/* construct the address using the tag */
addr = (status & 0xFFFFC800) | (subbank << 12) | (set << 5);
/* flush it */
__asm__ __volatile__("FLUSHINV[%0];" : : "a"(addr));
}
}
}
#endif
int bfin_pm_suspend_mem_enter(void)
{
unsigned long flags;
int wakeup, ret;
unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
+ L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH,
GFP_KERNEL);
if (memptr == NULL) {
panic("bf53x_suspend_l1_mem malloc failed");
return -ENOMEM;
}
wakeup = bfin_read_VR_CTL() & ~FREQ;
wakeup |= SCKELOW;
#ifdef CONFIG_PM_BFIN_WAKE_PH6
wakeup |= PHYWE;
#endif
#ifdef CONFIG_PM_BFIN_WAKE_GP
wakeup |= GPWE;
#endif
local_irq_save_hw(flags);
ret = blackfin_dma_suspend();
if (ret) {
local_irq_restore_hw(flags);
kfree(memptr);
return ret;
}
bfin_gpio_pm_hibernate_suspend();
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
_disable_dcplb();
_disable_icplb();
bf53x_suspend_l1_mem(memptr);
do_hibernate(wakeup | vr_wakeup); /* Goodbye */
bf53x_resume_l1_mem(memptr);
_enable_icplb();
_enable_dcplb();
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();
local_irq_restore_hw(flags);
kfree(memptr);
return 0;
}
/*
* bfin_pm_valid - Tell the PM core that we only support the standby sleep
* state
* @state: suspend state we're checking.
*
*/
static int bfin_pm_valid(suspend_state_t state)
{
return (state == PM_SUSPEND_STANDBY
#if !(defined(BF533_FAMILY) || defined(CONFIG_BF561))
/*
* On BF533/2/1:
* If we enter Hibernate the SCKE Pin is driven Low,
* so that the SDRAM enters Self Refresh Mode.
* However when the reset sequence that follows hibernate
* state is executed, SCKE is driven High, taking the
* SDRAM out of Self Refresh.
*
* If you reconfigure and access the SDRAM "very quickly",
* you are likely to avoid errors, otherwise the SDRAM
* start losing its contents.
* An external HW workaround is possible using logic gates.
*/
|| state == PM_SUSPEND_MEM
#endif
);
}
/*
* bfin_pm_enter - Actually enter a sleep state.
* @state: State we're entering.
*
*/
static int bfin_pm_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
bfin_pm_suspend_standby_enter();
break;
case PM_SUSPEND_MEM:
bfin_pm_suspend_mem_enter();
break;
default:
return -EINVAL;
}
return 0;
}
struct platform_suspend_ops bfin_pm_ops = {
.enter = bfin_pm_enter,
.valid = bfin_pm_valid,
};
static int __init bfin_pm_init(void)
{
suspend_set_ops(&bfin_pm_ops);
return 0;
}
__initcall(bfin_pm_init);

View File

@@ -0,0 +1,486 @@
/*
* IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
*
* Copyright 2007-2009 Analog Devices Inc.
* Philippe Gerum <rpm@xenomai.org>
*
* Licensed under the GPL-2.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
#include <asm/time.h>
#include <linux/err.h>
/*
* Anomaly notes:
* 05000120 - we always define corelock as 32-bit integer in L2
*/
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
*init_saved_dcplb_fault_addr_coreb;
cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
#define BFIN_IPI_RESCHEDULE 0
#define BFIN_IPI_CALL_FUNC 1
#define BFIN_IPI_CPU_STOP 2
struct blackfin_flush_data {
unsigned long start;
unsigned long end;
};
void *secondary_stack;
struct smp_call_struct {
void (*func)(void *info);
void *info;
int wait;
cpumask_t pending;
cpumask_t waitmask;
};
static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock);
struct ipi_message {
struct list_head list;
unsigned long type;
struct smp_call_struct call_struct;
};
struct ipi_message_queue {
struct list_head head;
spinlock_t lock;
unsigned long count;
};
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
static void ipi_cpu_stop(unsigned int cpu)
{
spin_lock(&stop_lock);
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
dump_stack();
spin_unlock(&stop_lock);
cpu_clear(cpu, cpu_online_map);
local_irq_disable();
while (1)
SSYNC();
}
static void ipi_flush_icache(void *info)
{
struct blackfin_flush_data *fdata = info;
/* Invalidate the memory holding the bounds of the flushed region. */
blackfin_dcache_invalidate_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata));
blackfin_icache_flush_range(fdata->start, fdata->end);
}
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
{
int wait;
void (*func)(void *info);
void *info;
func = msg->call_struct.func;
info = msg->call_struct.info;
wait = msg->call_struct.wait;
cpu_clear(cpu, msg->call_struct.pending);
func(info);
if (wait)
cpu_clear(cpu, msg->call_struct.waitmask);
else
kfree(msg);
}
static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
platform_clear_ipi(cpu);
msg_queue = &__get_cpu_var(ipi_msg_queue);
msg_queue->count++;
spin_lock(&msg_queue->lock);
while (!list_empty(&msg_queue->head)) {
msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
/* That's the easiest one; leave it to
* return_from_int. */
kfree(msg);
break;
case BFIN_IPI_CALL_FUNC:
spin_unlock(&msg_queue->lock);
ipi_call_function(cpu, msg);
spin_lock(&msg_queue->lock);
break;
case BFIN_IPI_CPU_STOP:
spin_unlock(&msg_queue->lock);
ipi_cpu_stop(cpu);
spin_lock(&msg_queue->lock);
kfree(msg);
break;
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message \
0x%lx\n", cpu, msg->type);
kfree(msg);
break;
}
}
spin_unlock(&msg_queue->lock);
return IRQ_HANDLED;
}
static void ipi_queue_init(void)
{
unsigned int cpu;
struct ipi_message_queue *msg_queue;
for_each_possible_cpu(cpu) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
INIT_LIST_HEAD(&msg_queue->head);
spin_lock_init(&msg_queue->lock);
msg_queue->count = 0;
}
}
int smp_call_function(void (*func)(void *info), void *info, int wait)
{
unsigned int cpu;
cpumask_t callmap;
unsigned long flags;
struct ipi_message_queue *msg_queue;
struct ipi_message *msg;
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
if (cpus_empty(callmap))
return 0;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return -ENOMEM;
INIT_LIST_HEAD(&msg->list);
msg->call_struct.func = func;
msg->call_struct.info = info;
msg->call_struct.wait = wait;
msg->call_struct.pending = callmap;
msg->call_struct.waitmask = callmap;
msg->type = BFIN_IPI_CALL_FUNC;
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
if (wait) {
while (!cpus_empty(msg->call_struct.waitmask))
blackfin_dcache_invalidate_range(
(unsigned long)(&msg->call_struct.waitmask),
(unsigned long)(&msg->call_struct.waitmask));
kfree(msg);
}
return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int wait)
{
unsigned int cpu = cpuid;
cpumask_t callmap;
unsigned long flags;
struct ipi_message_queue *msg_queue;
struct ipi_message *msg;
if (cpu_is_offline(cpu))
return 0;
cpus_clear(callmap);
cpu_set(cpu, callmap);
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return -ENOMEM;
INIT_LIST_HEAD(&msg->list);
msg->call_struct.func = func;
msg->call_struct.info = info;
msg->call_struct.wait = wait;
msg->call_struct.pending = callmap;
msg->call_struct.waitmask = callmap;
msg->type = BFIN_IPI_CALL_FUNC;
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
if (wait) {
while (!cpus_empty(msg->call_struct.waitmask))
blackfin_dcache_invalidate_range(
(unsigned long)(&msg->call_struct.waitmask),
(unsigned long)(&msg->call_struct.waitmask));
kfree(msg);
}
return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu)
{
unsigned long flags;
struct ipi_message_queue *msg_queue;
struct ipi_message *msg;
if (cpu_is_offline(cpu))
return;
msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return;
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_RESCHEDULE;
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
return;
}
void smp_send_stop(void)
{
unsigned int cpu;
cpumask_t callmap;
unsigned long flags;
struct ipi_message_queue *msg_queue;
struct ipi_message *msg;
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
if (cpus_empty(callmap))
return;
msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return;
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_CPU_STOP;
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
return;
}
int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
int ret;
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
return PTR_ERR(idle);
}
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
smp_wmb();
ret = platform_boot_secondary(cpu, idle);
if (ret) {
cpu_clear(cpu, cpu_present_map);
printk(KERN_CRIT "CPU%u: processor failed to boot (%d)\n", cpu, ret);
free_task(idle);
} else
cpu_set(cpu, cpu_online_map);
secondary_stack = NULL;
return ret;
}
static void __cpuinit setup_secondary(unsigned int cpu)
{
#if !defined(CONFIG_TICKSOURCE_GPTMR0)
struct irq_desc *timer_desc;
#endif
unsigned long ilat;
bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
CSYNC();
bfin_write_ILAT(ilat);
CSYNC();
/* Enable interrupt levels IVG7-15. IARs have been already
* programmed by the boot CPU. */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
#if defined(CONFIG_TICKSOURCE_GPTMR0)
/* Power down the core timer, just to play safe. */
bfin_write_TCNTL(0);
/* system timer0 has been setup by CoreA. */
#else
timer_desc = irq_desc + IRQ_CORETMR;
setup_core_timer();
timer_desc->chip->enable(IRQ_CORETMR);
#endif
}
void __cpuinit secondary_start_kernel(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
if (_bfin_swrst & SWRST_DBL_FAULT_B) {
printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
(int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
init_retx_coreb);
}
/*
* We want the D-cache to be enabled early, in case the atomic
* support code emulates cache coherence (see
* __ARCH_SYNC_CORE_DCACHE).
*/
init_exception_vectors();
bfin_setup_caches(cpu);
local_irq_disable();
/* Attach the new idle task to the global mm. */
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */
preempt_disable();
setup_secondary(cpu);
local_irq_enable();
platform_secondary_init(cpu);
cpu_idle();
}
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
platform_prepare_cpus(max_cpus);
ipi_queue_init();
platform_request_ipi(&ipi_handler);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = 0;
unsigned int cpu;
for_each_online_cpu(cpu)
bogosum += loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
}
void smp_icache_flush_range_others(unsigned long start, unsigned long end)
{
smp_flush_data.start = start;
smp_flush_data.end = end;
if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
#ifdef __ARCH_SYNC_CORE_ICACHE
void resync_core_icache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_icache();
++per_cpu(cpu_data, cpu).icache_invld_count;
put_cpu();
}
EXPORT_SYMBOL(resync_core_icache);
#endif
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
void resync_core_dcache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_dcache();
++per_cpu(cpu_data, cpu).dcache_invld_count;
put_cpu();
}
EXPORT_SYMBOL(resync_core_dcache);
#endif