add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,37 @@
#ifndef _4LEVEL_FIXUP_H
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
#define __PAGETABLE_PUD_FOLDED
#define PUD_SIZE PGDIR_SIZE
#define PUD_MASK PGDIR_MASK
#define PTRS_PER_PUD 1
#define pud_t pgd_t
#define pmd_alloc(mm, pud, address) \
((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
NULL: pmd_offset(pud, address))
#define pud_alloc(mm, pgd, address) (pgd)
#define pud_offset(pgd, start) (pgd)
#define pud_none(pud) 0
#define pud_bad(pud) 0
#define pud_present(pud) 1
#define pud_ERROR(pud) do { } while (0)
#define pud_clear(pud) pgd_clear(pud)
#define pud_val(pud) pgd_val(pud)
#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
#define pud_page(pud) pgd_page(pud)
#define pud_page_vaddr(pud) pgd_page_vaddr(pud)
#undef pud_free_tlb
#define pud_free_tlb(tlb, x, addr) do { } while (0)
#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x, addr) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
#endif

View File

@@ -0,0 +1,35 @@
header-y += auxvec.h
header-y += bitsperlong.h
header-y += errno-base.h
header-y += errno.h
header-y += fcntl.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
header-y += mman-common.h
header-y += mman.h
header-y += msgbuf.h
header-y += param.h
header-y += poll.h
header-y += posix_types.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
header-y += shmparam.h
header-y += signal-defs.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
header-y += stat.h
header-y += statfs.h
header-y += swab.h
header-y += termbits.h
header-y += termios.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
unifdef-y += int-l64.h
unifdef-y += int-ll64.h
unifdef-y += resource.h
unifdef-y += siginfo.h

View File

@@ -0,0 +1,44 @@
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
$(srctree)/include/asm-$(SRCARCH)/kvm.h),)
header-y += kvm.h
endif
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
$(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
header-y += kvm_para.h
endif
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
$(srctree)/include/asm-$(SRCARCH)/a.out.h),)
unifdef-y += a.out.h
endif
unifdef-y += auxvec.h
unifdef-y += byteorder.h
unifdef-y += bitsperlong.h
unifdef-y += errno.h
unifdef-y += fcntl.h
unifdef-y += ioctl.h
unifdef-y += ioctls.h
unifdef-y += ipcbuf.h
unifdef-y += mman.h
unifdef-y += msgbuf.h
unifdef-y += param.h
unifdef-y += poll.h
unifdef-y += posix_types.h
unifdef-y += ptrace.h
unifdef-y += resource.h
unifdef-y += sembuf.h
unifdef-y += setup.h
unifdef-y += shmbuf.h
unifdef-y += sigcontext.h
unifdef-y += siginfo.h
unifdef-y += signal.h
unifdef-y += socket.h
unifdef-y += sockios.h
unifdef-y += stat.h
unifdef-y += statfs.h
unifdef-y += swab.h
unifdef-y += termbits.h
unifdef-y += termios.h
unifdef-y += types.h
unifdef-y += unistd.h

View File

@@ -0,0 +1,258 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
* Christoph Lameter
*
* Allows to provide arch independent atomic definitions without the need to
* edit all arch specific atomic.h files.
*/
#include <asm/types.h>
/*
* Suppport for atomic_long_t
*
* Casts for parameters are avoided for existing atomic functions in order to
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the
* macros of a platform may have.
*/
#if BITS_PER_LONG == 64
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_sub(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_sub_and_test(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_dec_and_test(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_inc_and_test(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_add_negative(i, v);
}
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_return(i, v);
}
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_sub_return(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_inc_return(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_dec_return(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
#define atomic_long_cmpxchg(l, old, new) \
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic64_xchg((atomic64_t *)(v), (new)))
#else /* BITS_PER_LONG == 64 */
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
atomic_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_sub(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_sub_and_test(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_dec_and_test(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_inc_and_test(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_add_negative(i, v);
}
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_return(i, v);
}
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_sub_return(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_inc_return(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_dec_return(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic_xchg((atomic_t *)(v), (new)))
#endif /* BITS_PER_LONG == 64 */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */

View File

@@ -0,0 +1,165 @@
/*
* Generic C implementation of atomic counter operations
* Originally implemented for MN10300.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_ATOMIC_H
#define __ASM_GENERIC_ATOMIC_H
#ifdef CONFIG_SMP
#error not SMP safe
#endif
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
#include <asm/system.h>
/**
* atomic_add_return - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns the result
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
local_irq_save(flags);
temp = v->counter;
temp += i;
v->counter = temp;
local_irq_restore(flags);
return temp;
}
/**
* atomic_sub_return - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
local_irq_save(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
local_irq_restore(flags);
return temp;
}
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);
}
static inline void atomic_dec(atomic_t *v)
{
atomic_sub_return(1, v);
}
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long flags;
mask = ~mask;
local_irq_save(flags);
*addr &= mask;
local_irq_restore(flags);
}
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
/* Assume that atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic-long.h>
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */

View File

@@ -0,0 +1,42 @@
/*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
typedef struct {
long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
extern void atomic64_add(long long a, atomic64_t *v);
extern long long atomic64_add_return(long long a, atomic64_t *v);
extern void atomic64_sub(long long a, atomic64_t *v);
extern long long atomic64_sub_return(long long a, atomic64_t *v);
extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#endif /* _ASM_GENERIC_ATOMIC64_H */

View File

@@ -0,0 +1,22 @@
__NR_chmod,
__NR_fchmod,
#ifdef __NR_chown
__NR_chown,
__NR_fchown,
__NR_lchown,
#endif
__NR_setxattr,
__NR_lsetxattr,
__NR_fsetxattr,
__NR_removexattr,
__NR_lremovexattr,
__NR_fremovexattr,
#ifdef __NR_fchownat
__NR_fchownat,
__NR_fchmodat,
#endif
#ifdef __NR_chown32
__NR_chown32,
__NR_fchown32,
__NR_lchown32,
#endif

View File

@@ -0,0 +1,18 @@
__NR_rename,
__NR_mkdir,
__NR_rmdir,
#ifdef __NR_creat
__NR_creat,
#endif
__NR_link,
__NR_unlink,
__NR_symlink,
__NR_mknod,
#ifdef __NR_mkdirat
__NR_mkdirat,
__NR_mknodat,
__NR_unlinkat,
__NR_renameat,
__NR_linkat,
__NR_symlinkat,
#endif

View File

@@ -0,0 +1,8 @@
__NR_readlink,
__NR_quotactl,
__NR_listxattr,
__NR_llistxattr,
__NR_flistxattr,
__NR_getxattr,
__NR_lgetxattr,
__NR_fgetxattr,

View File

@@ -0,0 +1,3 @@
__NR_kill,
__NR_tgkill,
__NR_tkill,

View File

@@ -0,0 +1,13 @@
#include <asm-generic/audit_dir_write.h>
__NR_acct,
#ifdef __NR_swapon
__NR_swapon,
#endif
__NR_quotactl,
__NR_truncate,
#ifdef __NR_truncate64
__NR_truncate64,
#endif
#ifdef __NR_bind
__NR_bind, /* bind can affect fs object only in one way... */
#endif

View File

@@ -0,0 +1,8 @@
#ifndef __ASM_GENERIC_AUXVEC_H
#define __ASM_GENERIC_AUXVEC_H
/*
* Not all architectures need their own auxvec.h, the most
* common definitions are already in linux/auxvec.h.
*/
#endif /* __ASM_GENERIC_AUXVEC_H */

View File

@@ -0,0 +1,45 @@
#ifndef __ASM_GENERIC_BITOPS_H
#define __ASM_GENERIC_BITOPS_H
/*
* For the benefit of those who are trying to port Linux to another
* architecture, here are some C-language equivalents. You should
* recode these in the native assembly language, if at all possible.
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
#include <linux/irqflags.h>
#include <linux/compiler.h>
/*
* clear_bit may not imply a memory barrier
*/
#ifndef smp_mb__before_clear_bit
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
#endif
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __ASM_GENERIC_BITOPS_H */

View File

@@ -0,0 +1,43 @@
#ifndef _ASM_GENERIC_BITOPS___FFS_H_
#define _ASM_GENERIC_BITOPS___FFS_H_
#include <asm/types.h>
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __ffs(unsigned long word)
{
int num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff) == 0) {
num += 32;
word >>= 32;
}
#endif
if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */

View File

@@ -0,0 +1,43 @@
#ifndef _ASM_GENERIC_BITOPS___FLS_H_
#define _ASM_GENERIC_BITOPS___FLS_H_
#include <asm/types.h>
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __fls(unsigned long word)
{
int num = BITS_PER_LONG - 1;
#if BITS_PER_LONG == 64
if (!(word & (~0ul << 32))) {
num -= 32;
word <<= 32;
}
#endif
if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (BITS_PER_LONG-1))))
num -= 1;
return num;
}
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */

View File

@@ -0,0 +1,189 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
#include <asm/types.h>
#include <asm/system.h>
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h> /* we use L1_CACHE_BYTES */
/* Use an array of spinlocks for our atomic_ts.
* Hash function to index into a different SPINLOCK.
* Since "a" is usually an address, use one spinlock per cacheline.
*/
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
#else
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif
/*
* NMI events can occur at any time, including when interrupts have been
* disabled by *_irqsave(). So you can get NMI events occurring while a
* *_bit function is holding a spin lock. If the NMI handler also wants
* to do bit manipulation (and they do) then you can get a deadlock
* between the original caller of *_bit() and the NMI handler.
*
* by Keith Owens
*/
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
*p |= mask;
_atomic_spin_unlock_irqrestore(p, flags);
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
*p &= ~mask;
_atomic_spin_unlock_irqrestore(p, flags);
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered. It may be
* reordered on other architectures than x86.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
*p ^= mask;
_atomic_spin_unlock_irqrestore(p, flags);
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It may be reordered on other architectures than x86.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
old = *p;
*p = old | mask;
_atomic_spin_unlock_irqrestore(p, flags);
return (old & mask) != 0;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It can be reorderdered on other architectures other than x86.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
old = *p;
*p = old & ~mask;
_atomic_spin_unlock_irqrestore(p, flags);
return (old & mask) != 0;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
old = *p;
*p = old ^ mask;
_atomic_spin_unlock_irqrestore(p, flags);
return (old & mask) != 0;
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */

View File

@@ -0,0 +1,22 @@
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
#define ext2_set_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
spin_unlock(lock); \
ret; \
})
#define ext2_clear_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
spin_unlock(lock); \
ret; \
})
#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */

View File

@@ -0,0 +1,20 @@
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
#include <asm-generic/bitops/le.h>
#define ext2_set_bit(nr,addr) \
generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_clear_bit(nr,addr) \
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
#define ext2_test_bit(nr,addr) \
generic_test_le_bit((nr),(unsigned long *)(addr))
#define ext2_find_first_zero_bit(addr, size) \
generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
#define ext2_find_next_zero_bit(addr, size, off) \
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
#define ext2_find_next_bit(addr, size, off) \
generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */

View File

@@ -0,0 +1,41 @@
#ifndef _ASM_GENERIC_BITOPS_FFS_H_
#define _ASM_GENERIC_BITOPS_FFS_H_
/**
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int x)
{
int r = 1;
if (!x)
return 0;
if (!(x & 0xffff)) {
x >>= 16;
r += 16;
}
if (!(x & 0xff)) {
x >>= 8;
r += 8;
}
if (!(x & 0xf)) {
x >>= 4;
r += 4;
}
if (!(x & 3)) {
x >>= 2;
r += 2;
}
if (!(x & 1)) {
x >>= 1;
r += 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
#define _ASM_GENERIC_BITOPS_FFZ_H_
/*
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
#define ffz(x) __ffs(~(x))
#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */

View File

@@ -0,0 +1,15 @@
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */

View File

@@ -0,0 +1,41 @@
#ifndef _ASM_GENERIC_BITOPS_FLS_H_
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __always_inline int fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */

View File

@@ -0,0 +1,36 @@
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
#define _ASM_GENERIC_BITOPS_FLS64_H_
#include <asm/types.h>
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#if BITS_PER_LONG == 32
static __always_inline int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
#elif BITS_PER_LONG == 64
static __always_inline int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
#else
#error BITS_PER_LONG not 32 or 64
#endif
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */

View File

@@ -0,0 +1,11 @@
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
#include <asm/types.h>
extern unsigned int hweight32(unsigned int w);
extern unsigned int hweight16(unsigned int w);
extern unsigned int hweight8(unsigned int w);
extern unsigned long hweight64(__u64 w);
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */

View File

@@ -0,0 +1,57 @@
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#if defined(__LITTLE_ENDIAN)
#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
#define generic_find_next_le_bit(addr, size, offset) \
find_next_bit(addr, size, offset)
#elif defined(__BIG_ENDIAN)
#define generic_test_le_bit(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___set_le_bit(nr, addr) \
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___clear_le_bit(nr, addr) \
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic_test_and_set_le_bit(nr, addr) \
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic_test_and_clear_le_bit(nr, addr) \
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___test_and_set_le_bit(nr, addr) \
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___test_and_clear_le_bit(nr, addr) \
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define generic_find_first_zero_le_bit(addr, size) \
generic_find_next_zero_le_bit((addr), (size), 0)
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */

View File

@@ -0,0 +1,45 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
#define clear_bit_unlock(nr, addr) \
do { \
smp_mb__before_clear_bit(); \
clear_bit(nr, addr); \
} while (0)
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is like clear_bit_unlock, however it is not atomic.
* It does provide release barrier semantics so it can be used to unlock
* a bit lock, however it would only be used if no other CPU can modify
* any bits in the memory until the lock is released (a good example is
* if the bit lock itself protects access to the other bits in the word).
*/
#define __clear_bit_unlock(nr, addr) \
do { \
smp_mb(); \
__clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */

View File

@@ -0,0 +1,17 @@
#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
#include <asm-generic/bitops/le.h>
#define minix_test_and_set_bit(nr,addr) \
generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
#define minix_set_bit(nr,addr) \
generic___set_le_bit((nr),(unsigned long *)(addr))
#define minix_test_and_clear_bit(nr,addr) \
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
#define minix_test_bit(nr,addr) \
generic_test_le_bit((nr),(unsigned long *)(addr))
#define minix_find_first_zero_bit(addr,size) \
generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */

View File

@@ -0,0 +1,15 @@
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
#define _ASM_GENERIC_BITOPS_MINIX_H_
#define minix_test_and_set_bit(nr,addr) \
__test_and_set_bit((nr),(unsigned long *)(addr))
#define minix_set_bit(nr,addr) \
__set_bit((nr),(unsigned long *)(addr))
#define minix_test_and_clear_bit(nr,addr) \
__test_and_clear_bit((nr),(unsigned long *)(addr))
#define minix_test_bit(nr,addr) \
test_bit((nr),(unsigned long *)(addr))
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((unsigned long *)(addr),(size))
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */

View File

@@ -0,0 +1,108 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <asm/types.h>
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */

View File

@@ -0,0 +1,31 @@
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
#define _ASM_GENERIC_BITOPS_SCHED_H_
#include <linux/compiler.h> /* unlikely() */
#include <asm/types.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 100-bit bitmap. It's guaranteed that at least
* one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
if (b[0])
return __ffs(b[0]);
return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
if (b[0])
return __ffs(b[0]);
if (b[1])
return __ffs(b[1]) + 32;
if (b[2])
return __ffs(b[2]) + 64;
return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
}
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */

View File

@@ -0,0 +1,32 @@
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
* bitsperlong.h. In particular, an architecture that supports
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
#ifdef __KERNEL__
#ifdef CONFIG_64BIT
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif /* CONFIG_64BIT */
/*
* FIXME: The check currently breaks x86-64 build, so it's
* temporarily disabled. Please fix x86-64 and reenable
*/
#if 0 && BITS_PER_LONG != __BITS_PER_LONG
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_BITS_PER_LONG */

View File

@@ -0,0 +1,144 @@
#ifndef _ASM_GENERIC_BUG_H
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
#ifndef __ASSEMBLY__
struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
#else
signed int bug_addr_disp;
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
const char *file;
#else
signed int file_disp;
#endif
unsigned short line;
#endif
unsigned short flags;
};
#endif /* __ASSEMBLY__ */
#define BUGFLAG_WARNING (1<<0)
#endif /* CONFIG_GENERIC_BUG */
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one
* example might be detecting data structure corruption in the middle
* of an operation that can't be backed out of. If the (sub)system
* can somehow continue operating, perhaps with reduced functionality,
* it's probably not BUG-worthy.
*
* If you're tempted to BUG(), think again: is completely giving up
* really the *only* solution? There are usually better options, where
* users don't need to reboot ASAP and can mostly shut down cleanly.
*/
#ifndef HAVE_ARCH_BUG
#define BUG() do { \
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
panic("BUG!"); \
} while (0)
#endif
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
#endif
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant issues that need prompt attention if they should ever
* appear at runtime. Use the versions with printk format strings
* to provide better diagnostics.
*/
#ifndef __WARN
#ifndef __ASSEMBLY__
extern void warn_slowpath_fmt(const char *file, const int line,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
extern void warn_slowpath_null(const char *file, const int line);
#define WANT_WARN_ON_SLOWPATH
#endif
#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
#else
#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
#endif
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN(); \
unlikely(__ret_warn_on); \
})
#endif
#ifndef WARN
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf(format); \
unlikely(__ret_warn_on); \
})
#endif
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
#define BUG() do {} while(0)
#endif
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (condition) ; } while(0)
#endif
#ifndef HAVE_ARCH_WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
unlikely(__ret_warn_on); \
})
#endif
#ifndef WARN
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
unlikely(__ret_warn_on); \
})
#endif
#endif
#define WARN_ON_ONCE(condition) ({ \
static int __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
__warned = 1; \
unlikely(__ret_warn_once); \
})
#define WARN_ONCE(condition, format...) ({ \
static int __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
__warned = 1; \
unlikely(__ret_warn_once); \
})
#define WARN_ON_RATELIMIT(condition, state) \
WARN_ON((condition) && __ratelimit(state))
#ifdef CONFIG_SMP
# define WARN_ON_SMP(x) WARN_ON(x)
#else
# define WARN_ON_SMP(x) do { } while (0)
#endif
#endif

View File

@@ -0,0 +1,10 @@
#ifndef __ASM_GENERIC_BUGS_H
#define __ASM_GENERIC_BUGS_H
/*
* This file is included by 'init/main.c' to check for
* architecture-dependent bugs.
*/
static inline void check_bugs(void) { }
#endif /* __ASM_GENERIC_BUGS_H */

View File

@@ -0,0 +1,12 @@
#ifndef __ASM_GENERIC_CACHE_H
#define __ASM_GENERIC_CACHE_H
/*
* 32 bytes appears to be the most common cache line size,
* so make that the default here. Architectures with larger
* cache lines need to provide their own cache.h.
*/
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif /* __ASM_GENERIC_CACHE_H */

View File

@@ -0,0 +1,30 @@
#ifndef __ASM_CACHEFLUSH_H
#define __ASM_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __ASM_CACHEFLUSH_H */

View File

@@ -0,0 +1,79 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
/*
* the same as csum_partial_copy, but copies from user space.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err);
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy((src), (dst), (len), (sum))
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return (__force __sum16)~sum;
}
#ifndef csum_tcpudp_nofold
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
extern __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum);
#endif
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
extern __sum16 ip_compute_csum(const void *buff, int len);
#endif /* __ASM_GENERIC_CHECKSUM_H */

View File

@@ -0,0 +1,65 @@
#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
#define __ASM_GENERIC_CMPXCHG_LOCAL_H
#include <linux/types.h>
extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
/*
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
/*
* Sanity checking, compile-time.
*/
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
local_irq_save(flags);
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = (u8)new;
break;
case 2: prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = (u16)new;
break;
case 4: prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = (u32)new;
break;
case 8: prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = (u64)new;
break;
default:
wrong_size_cmpxchg(ptr);
}
local_irq_restore(flags);
return prev;
}
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
unsigned long flags;
local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
local_irq_restore(flags);
return prev;
}
#endif

View File

@@ -0,0 +1,22 @@
#ifndef __ASM_GENERIC_CMPXCHG_H
#define __ASM_GENERIC_CMPXCHG_H
/*
* Generic cmpxchg
*
* Uses the local cmpxchg. Does not support SMP.
*/
#ifdef CONFIG_SMP
#error "Cannot use generic cmpxchg on SMP"
#endif
/*
* Atomic compare and exchange.
*
* Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
* a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif

View File

@@ -0,0 +1,70 @@
#ifndef _ASM_GENERIC_CPUTIME_H
#define _ASM_GENERIC_CPUTIME_H
#include <linux/time.h>
#include <linux/jiffies.h>
typedef unsigned long cputime_t;
#define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__ct)
#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__hz)
typedef u64 cputime64_t;
#define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime64_to_jiffies64(__ct) (__ct)
#define jiffies64_to_cputime64(__jif) (__jif)
#define cputime_to_cputime64(__ct) ((u64) __ct)
/*
* Convert cputime to milliseconds and back.
*/
#define cputime_to_msecs(__ct) jiffies_to_msecs(__ct)
#define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs)
/*
* Convert cputime to seconds and back.
*/
#define cputime_to_secs(jif) ((jif) / HZ)
#define secs_to_cputime(sec) ((sec) * HZ)
/*
* Convert cputime to timespec and back.
*/
#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
/*
* Convert cputime to timeval and back.
*/
#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
/*
* Convert cputime to clock and back.
*/
#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
/*
* Convert cputime64 to clock.
*/
#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
#endif

View File

@@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
#endif /* __ASM_GENERIC_CURRENT_H */

View File

@@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
extern void __udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
#define udelay(n) __udelay(n)
#endif /* __ASM_GENERIC_DELAY_H */

View File

@@ -0,0 +1,15 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_GENERIC_DEVICE_H
#define _ASM_GENERIC_DEVICE_H
struct dev_archdata {
};
struct pdev_archdata {
};
#endif /* _ASM_GENERIC_DEVICE_H */

View File

@@ -0,0 +1,58 @@
#ifndef _ASM_GENERIC_DIV64_H
#define _ASM_GENERIC_DIV64_H
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
* Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
*
* The semantics of do_div() are:
*
* uint32_t do_div(uint64_t *n, uint32_t base)
* {
* uint32_t remainder = *n % base;
* *n = *n / base;
* return remainder;
* }
*
* NOTE: macro parameter n is evaluated multiple times,
* beware of side effects!
*/
#include <linux/types.h>
#include <linux/compiler.h>
#if BITS_PER_LONG == 64
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
__rem = ((uint64_t)(n)) % __base; \
(n) = ((uint64_t)(n)) / __base; \
__rem; \
})
#elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
/* The unnecessary pointer compare is there
* to check for type safety (n must be 64bit)
*/
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})
#else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64
#endif /* BITS_PER_LONG */
#endif /* _ASM_GENERIC_DIV64_H */

View File

@@ -0,0 +1,32 @@
#ifndef DMA_COHERENT_H
#define DMA_COHERENT_H
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/*
* These two functions are only for dma allocator.
* Don't use them in device drivers.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
/*
* Standard interface
*/
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags);
extern void
dma_release_declared_memory(struct device *dev);
extern void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
#else
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
#define dma_release_from_coherent(dev, order, vaddr) (0)
#endif
#endif

View File

@@ -0,0 +1,82 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
/* define the dma api to allow compilation but not linking of
* dma dependent code. Code that depends on the dma-mapping
* API needs to set 'depends on HAS_DMA' in its Kconfig
*/
struct scatterlist;
extern void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag);
extern void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction);
extern void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction);
extern int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
extern void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction);
extern dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction);
extern void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction);
extern void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction);
extern void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction);
extern void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction);
#define dma_sync_single_for_device dma_sync_single_for_cpu
#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
#define dma_sync_sg_for_device dma_sync_sg_for_cpu
extern int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int
dma_supported(struct device *dev, u64 mask);
extern int
dma_set_mask(struct device *dev, u64 mask);
extern int
dma_get_cache_alignment(void);
extern int
dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#endif /* _ASM_GENERIC_DMA_MAPPING_H */

View File

@@ -0,0 +1,184 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, addr, true);
return addr;
}
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
struct scatterlist *s;
for_each_sg(sg, s, nents, i)
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents;
}
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, NULL);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_range_for_cpu) {
ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
} else
dma_sync_single_for_cpu(dev, addr + offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_range_for_device) {
ops->sync_single_range_for_device(dev, addr, offset, size, dir);
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
} else
dma_sync_single_for_device(dev, addr + offset, size, dir);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
#endif

View File

@@ -0,0 +1,15 @@
#ifndef __ASM_GENERIC_DMA_H
#define __ASM_GENERIC_DMA_H
/*
* This file traditionally describes the i8237 PC style DMA controller.
* Most architectures don't have these any more and can get the minimal
* implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS.
*
* Some code relies on seeing MAX_DMA_ADDRESS though.
*/
#define MAX_DMA_ADDRESS PAGE_OFFSET
extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
#endif /* __ASM_GENERIC_DMA_H */

View File

@@ -0,0 +1,9 @@
#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
#define _ASM_GENERIC_EMERGENCY_RESTART_H
static inline void machine_emergency_restart(void)
{
machine_restart(NULL);
}
#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */

View File

@@ -0,0 +1,39 @@
#ifndef _ASM_GENERIC_ERRNO_BASE_H
#define _ASM_GENERIC_ERRNO_BASE_H
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#endif

View File

@@ -0,0 +1,111 @@
#ifndef _ASM_GENERIC_ERRNO_H
#define _ASM_GENERIC_ERRNO_H
#include <asm-generic/errno-base.h>
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOMSG 42 /* No message of desired type */
#define EIDRM 43 /* Identifier removed */
#define ECHRNG 44 /* Channel number out of range */
#define EL2NSYNC 45 /* Level 2 not synchronized */
#define EL3HLT 46 /* Level 3 halted */
#define EL3RST 47 /* Level 3 reset */
#define ELNRNG 48 /* Link number out of range */
#define EUNATCH 49 /* Protocol driver not attached */
#define ENOCSI 50 /* No CSI structure available */
#define EL2HLT 51 /* Level 2 halted */
#define EBADE 52 /* Invalid exchange */
#define EBADR 53 /* Invalid request descriptor */
#define EXFULL 54 /* Exchange full */
#define ENOANO 55 /* No anode */
#define EBADRQC 56 /* Invalid request code */
#define EBADSLT 57 /* Invalid slot */
#define EDEADLOCK EDEADLK
#define EBFONT 59 /* Bad font file format */
#define ENOSTR 60 /* Device not a stream */
#define ENODATA 61 /* No data available */
#define ETIME 62 /* Timer expired */
#define ENOSR 63 /* Out of streams resources */
#define ENONET 64 /* Machine is not on the network */
#define ENOPKG 65 /* Package not installed */
#define EREMOTE 66 /* Object is remote */
#define ENOLINK 67 /* Link has been severed */
#define EADV 68 /* Advertise error */
#define ESRMNT 69 /* Srmount error */
#define ECOMM 70 /* Communication error on send */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
#define EREMCHG 78 /* Remote address changed */
#define ELIBACC 79 /* Can not access a needed shared library */
#define ELIBBAD 80 /* Accessing a corrupted shared library */
#define ELIBSCN 81 /* .lib section in a.out corrupted */
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
#define EILSEQ 84 /* Illegal byte sequence */
#define ERESTART 85 /* Interrupted system call should be restarted */
#define ESTRPIPE 86 /* Streams pipe error */
#define EUSERS 87 /* Too many users */
#define ENOTSOCK 88 /* Socket operation on non-socket */
#define EDESTADDRREQ 89 /* Destination address required */
#define EMSGSIZE 90 /* Message too long */
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
#define ENOPROTOOPT 92 /* Protocol not available */
#define EPROTONOSUPPORT 93 /* Protocol not supported */
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define EADDRINUSE 98 /* Address already in use */
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define ENETDOWN 100 /* Network is down */
#define ENETUNREACH 101 /* Network is unreachable */
#define ENETRESET 102 /* Network dropped connection because of reset */
#define ECONNABORTED 103 /* Software caused connection abort */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EISCONN 106 /* Transport endpoint is already connected */
#define ENOTCONN 107 /* Transport endpoint is not connected */
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
#define ETIMEDOUT 110 /* Connection timed out */
#define ECONNREFUSED 111 /* Connection refused */
#define EHOSTDOWN 112 /* Host is down */
#define EHOSTUNREACH 113 /* No route to host */
#define EALREADY 114 /* Operation already in progress */
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale NFS file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
#define ECANCELED 125 /* Operation Canceled */
#define ENOKEY 126 /* Required key not available */
#define EKEYEXPIRED 127 /* Key has expired */
#define EKEYREVOKED 128 /* Key has been revoked */
#define EKEYREJECTED 129 /* Key was rejected by service */
/* for robust mutexes */
#define EOWNERDEAD 130 /* Owner died */
#define ENOTRECOVERABLE 131 /* State not recoverable */
#define ERFKILL 132 /* Operation not possible due to RF-kill */
#endif

View File

@@ -0,0 +1,12 @@
#ifndef __ASM_GENERIC_FB_H_
#define __ASM_GENERIC_FB_H_
#include <linux/fb.h>
#define fb_pgprotect(...) do {} while (0)
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* __ASM_GENERIC_FB_H_ */

View File

@@ -0,0 +1,167 @@
#ifndef _ASM_GENERIC_FCNTL_H
#define _ASM_GENERIC_FCNTL_H
#include <linux/types.h>
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_ACCMODE 00000003
#define O_RDONLY 00000000
#define O_WRONLY 00000001
#define O_RDWR 00000002
#ifndef O_CREAT
#define O_CREAT 00000100 /* not fcntl */
#endif
#ifndef O_EXCL
#define O_EXCL 00000200 /* not fcntl */
#endif
#ifndef O_NOCTTY
#define O_NOCTTY 00000400 /* not fcntl */
#endif
#ifndef O_TRUNC
#define O_TRUNC 00001000 /* not fcntl */
#endif
#ifndef O_APPEND
#define O_APPEND 00002000
#endif
#ifndef O_NONBLOCK
#define O_NONBLOCK 00004000
#endif
#ifndef O_SYNC
#define O_SYNC 00010000
#endif
#ifndef FASYNC
#define FASYNC 00020000 /* fcntl, for BSD compatibility */
#endif
#ifndef O_DIRECT
#define O_DIRECT 00040000 /* direct disk access hint */
#endif
#ifndef O_LARGEFILE
#define O_LARGEFILE 00100000
#endif
#ifndef O_DIRECTORY
#define O_DIRECTORY 00200000 /* must be a directory */
#endif
#ifndef O_NOFOLLOW
#define O_NOFOLLOW 00400000 /* don't follow links */
#endif
#ifndef O_NOATIME
#define O_NOATIME 01000000
#endif
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000 /* set close_on_exec */
#endif
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
#endif
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define F_SETFD 2 /* set/clear close_on_exec */
#define F_GETFL 3 /* get file->f_flags */
#define F_SETFL 4 /* set file->f_flags */
#ifndef F_GETLK
#define F_GETLK 5
#define F_SETLK 6
#define F_SETLKW 7
#endif
#ifndef F_SETOWN
#define F_SETOWN 8 /* for sockets. */
#define F_GETOWN 9 /* for sockets. */
#endif
#ifndef F_SETSIG
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
#endif
#ifndef CONFIG_64BIT
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
#endif
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
#define F_GETOWN_EX 16
#endif
#define F_OWNER_TID 0
#define F_OWNER_PID 1
#define F_OWNER_PGRP 2
struct f_owner_ex {
int type;
pid_t pid;
};
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
/* for posix fcntl() and lockf() */
#ifndef F_RDLCK
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
#endif
/* for old implementation of bsd flock () */
#ifndef F_EXLCK
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
#endif
/* for leases */
#ifndef F_INPROGRESS
#define F_INPROGRESS 16
#endif
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
#define LOCK_RW 192 /* which allows concurrent read & write ops */
#define F_LINUX_SPECIFIC_BASE 1024
#ifndef HAVE_ARCH_STRUCT_FLOCK
#ifndef __ARCH_FLOCK_PAD
#define __ARCH_FLOCK_PAD
#endif
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
__ARCH_FLOCK_PAD
};
#endif
#ifndef CONFIG_64BIT
#ifndef HAVE_ARCH_STRUCT_FLOCK64
#ifndef __ARCH_FLOCK64_PAD
#define __ARCH_FLOCK64_PAD
#endif
struct flock64 {
short l_type;
short l_whence;
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
__ARCH_FLOCK64_PAD
};
#endif
#endif /* !CONFIG_64BIT */
#endif /* _ASM_GENERIC_FCNTL_H */

View File

@@ -0,0 +1,56 @@
#ifndef _ASM_GENERIC_FUTEX_H
#define _ASM_GENERIC_FUTEX_H
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
case FUTEX_OP_ADD:
case FUTEX_OP_OR:
case FUTEX_OP_ANDN:
case FUTEX_OP_XOR:
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
return -ENOSYS;
}
#endif

View File

@@ -0,0 +1,24 @@
#ifndef __ASM_GENERIC_GETORDER_H
#define __ASM_GENERIC_GETORDER_H
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
/* Pure 2^n version of get_order */
static inline __attribute_const__ int get_order(unsigned long size)
{
int order;
size = (size - 1) >> (PAGE_SHIFT - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_GETORDER_H */

View File

@@ -0,0 +1,202 @@
#ifndef _ASM_GENERIC_GPIO_H
#define _ASM_GENERIC_GPIO_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#ifdef CONFIG_GPIOLIB
#include <linux/compiler.h>
/* Platforms may implement their GPIO interface with library code,
* at a small performance cost for non-inlined operations and some
* extra memory (for code and for per-GPIO table entries).
*
* While the GPIO programming interface defines valid GPIO numbers
* to be in the range 0..MAX_INT, this library restricts them to the
* smaller range 0..ARCH_NR_GPIOS-1.
*/
#ifndef ARCH_NR_GPIOS
#define ARCH_NR_GPIOS 256
#endif
static inline int gpio_is_valid(int number)
{
/* only some non-negative numbers are valid */
return ((unsigned)number) < ARCH_NR_GPIOS;
}
struct seq_file;
struct module;
/**
* struct gpio_chip - abstract a GPIO controller
* @label: for diagnostics
* @dev: optional device providing the GPIOs
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
* enabling module power and clock; may sleep
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @direction_input: configures signal "offset" as input, or returns error
* @get: returns value for signal "offset"; for output signals this
* returns either the value actually sensed, or zero
* @direction_output: configures signal "offset" as output, or returns error
* @set: assigns output value for signal "offset"
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
* @base: identifies the first GPIO number handled by this chip; or, if
* negative during registration, requests dynamic ID allocation.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
* @can_sleep: flag must be set iff get()/set() methods sleep, as they
* must while accessing GPIO expander chips over I2C or SPI
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
* array must be @ngpio entries long.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
* Example sources would be SOC controllers, FPGAs, multifunction
* chips, dedicated GPIO expanders, and so on.
*
* Each chip controls a number of signals, identified in method calls
* by "offset" values in the range 0..(@ngpio - 1). When those signals
* are referenced through calls like gpio_get_value(gpio), the offset
* is calculated by subtracting @base from the gpio number.
*/
struct gpio_chip {
const char *label;
struct device *dev;
struct module *owner;
int (*request)(struct gpio_chip *chip,
unsigned offset);
void (*free)(struct gpio_chip *chip,
unsigned offset);
int (*direction_input)(struct gpio_chip *chip,
unsigned offset);
int (*get)(struct gpio_chip *chip,
unsigned offset);
int (*direction_output)(struct gpio_chip *chip,
unsigned offset, int value);
void (*set)(struct gpio_chip *chip,
unsigned offset, int value);
int (*to_irq)(struct gpio_chip *chip,
unsigned offset);
void (*dbg_show)(struct seq_file *s,
struct gpio_chip *chip);
int base;
u16 ngpio;
char **names;
unsigned can_sleep:1;
unsigned exported:1;
};
extern const char *gpiochip_is_requested(struct gpio_chip *chip,
unsigned offset);
extern int __must_check gpiochip_reserve(int start, int ngpio);
/* add/remove chips */
extern int gpiochip_add(struct gpio_chip *chip);
extern int __must_check gpiochip_remove(struct gpio_chip *chip);
/* Always use the library code for GPIO management calls,
* or when sleeping may be involved.
*/
extern int gpio_request(unsigned gpio, const char *label);
extern void gpio_free(unsigned gpio);
extern int gpio_direction_input(unsigned gpio);
extern int gpio_direction_output(unsigned gpio, int value);
extern int gpio_get_value_cansleep(unsigned gpio);
extern void gpio_set_value_cansleep(unsigned gpio, int value);
/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
* the GPIO is constant and refers to some always-present controller,
* giving direct access to chip registers and tight bitbanging loops.
*/
extern int __gpio_get_value(unsigned gpio);
extern void __gpio_set_value(unsigned gpio, int value);
extern int __gpio_cansleep(unsigned gpio);
extern int __gpio_to_irq(unsigned gpio);
#ifdef CONFIG_GPIO_SYSFS
/*
* A sysfs interface can be exported by individual drivers if they want,
* but more typically is configured entirely from userspace.
*/
extern int gpio_export(unsigned gpio, bool direction_may_change);
extern int gpio_export_link(struct device *dev, const char *name,
unsigned gpio);
extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
#else /* !CONFIG_HAVE_GPIO_LIB */
static inline int gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
return number >= 0;
}
/* platforms that don't directly support access to GPIOs through I2C, SPI,
* or other blocking infrastructure can use these wrappers.
*/
static inline int gpio_cansleep(unsigned gpio)
{
return 0;
}
static inline int gpio_get_value_cansleep(unsigned gpio)
{
might_sleep();
return gpio_get_value(gpio);
}
static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
might_sleep();
gpio_set_value(gpio, value);
}
#endif /* !CONFIG_HAVE_GPIO_LIB */
#ifndef CONFIG_GPIO_SYSFS
/* sysfs support is only available with gpiolib, where it's optional */
static inline int gpio_export(unsigned gpio, bool direction_may_change)
{
return -ENOSYS;
}
static inline int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
{
return -ENOSYS;
}
static inline void gpio_unexport(unsigned gpio)
{
}
#endif /* CONFIG_GPIO_SYSFS */
#endif /* _ASM_GENERIC_GPIO_H */

View File

@@ -0,0 +1,21 @@
#ifndef __ASM_GENERIC_HARDIRQ_H
#define __ASM_GENERIC_HARDIRQ_H
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#ifndef ack_bad_irq
static inline void ack_bad_irq(unsigned int irq)
{
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
}
#endif
#endif /* __ASM_GENERIC_HARDIRQ_H */

View File

@@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_HW_IRQ_H
#define __ASM_GENERIC_HW_IRQ_H
/*
* hw_irq.h has internal declarations for the low-level interrupt
* controller, like the original i8259A.
* In general, this is not needed for new architectures.
*/
#endif /* __ASM_GENERIC_HW_IRQ_H */

View File

@@ -0,0 +1,38 @@
/* Generic I/O and MEMIO string operations. */
#define __ide_insw insw
#define __ide_insl insl
#define __ide_outsw outsw
#define __ide_outsl outsl
static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u16 *)addr = readw(port);
addr += 2;
}
}
static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u32 *)addr = readl(port);
addr += 4;
}
}
static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
writew(*(u16 *)addr, port);
addr += 2;
}
}
static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
{
while (count--) {
writel(*(u32 *)addr, port);
addr += 4;
}
}

View File

@@ -0,0 +1,73 @@
/*
* asm-generic/int-l64.h
*
* Integer declarations for architectures which use "long"
* for 64-bit types.
*/
#ifndef _ASM_GENERIC_INT_L64_H
#define _ASM_GENERIC_INT_L64_H
#include <asm/bitsperlong.h>
#ifndef __ASSEMBLY__
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
typedef __signed__ long __s64;
typedef unsigned long __u64;
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long s64;
typedef unsigned long u64;
#define S8_C(x) x
#define U8_C(x) x ## U
#define S16_C(x) x
#define U16_C(x) x ## U
#define S32_C(x) x
#define U32_C(x) x ## U
#define S64_C(x) x ## L
#define U64_C(x) x ## UL
#else /* __ASSEMBLY__ */
#define S8_C(x) x
#define U8_C(x) x
#define S16_C(x) x
#define U16_C(x) x
#define S32_C(x) x
#define U32_C(x) x
#define S64_C(x) x
#define U64_C(x) x
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_INT_L64_H */

View File

@@ -0,0 +1,78 @@
/*
* asm-generic/int-ll64.h
*
* Integer declarations for architectures which use "long long"
* for 64-bit types.
*/
#ifndef _ASM_GENERIC_INT_LL64_H
#define _ASM_GENERIC_INT_LL64_H
#include <asm/bitsperlong.h>
#ifndef __ASSEMBLY__
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#ifdef __GNUC__
__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
#else
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
#endif
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
#define S8_C(x) x
#define U8_C(x) x ## U
#define S16_C(x) x
#define U16_C(x) x ## U
#define S32_C(x) x
#define U32_C(x) x ## U
#define S64_C(x) x ## LL
#define U64_C(x) x ## ULL
#else /* __ASSEMBLY__ */
#define S8_C(x) x
#define U8_C(x) x
#define S16_C(x) x
#define U16_C(x) x
#define S32_C(x) x
#define U32_C(x) x
#define S64_C(x) x
#define U64_C(x) x
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_INT_LL64_H */

View File

@@ -0,0 +1,300 @@
/* Generic I/O port emulation, based on MN10300 code
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_IO_H
#define __ASM_GENERIC_IO_H
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <asm/cacheflush.h>
#include <linux/types.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
#endif
#define mmiowb() do {} while (0)
/*****************************************************************************/
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the simple architectures, we just read/write the
* memory location directly.
*/
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
return *(const volatile u8 __force *) addr;
}
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
return *(const volatile u16 __force *) addr;
}
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
return *(const volatile u32 __force *) addr;
}
#define readb __raw_readb
#define readw(addr) __le16_to_cpu(__raw_readw(addr))
#define readl(addr) __le32_to_cpu(__raw_readl(addr))
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
*(volatile u8 __force *) addr = b;
}
static inline void __raw_writew(u16 b, volatile void __iomem *addr)
{
*(volatile u16 __force *) addr = b;
}
static inline void __raw_writel(u32 b, volatile void __iomem *addr)
{
*(volatile u32 __force *) addr = b;
}
#define writeb __raw_writeb
#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
#ifdef CONFIG_64BIT
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
return *(const volatile u64 __force *) addr;
}
#define readq(addr) __le64_to_cpu(__raw_readq(addr))
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
/*****************************************************************************/
/*
* traditional input/output functions
*/
static inline u8 inb(unsigned long addr)
{
return readb((volatile void __iomem *) addr);
}
static inline u16 inw(unsigned long addr)
{
return readw((volatile void __iomem *) addr);
}
static inline u32 inl(unsigned long addr)
{
return readl((volatile void __iomem *) addr);
}
static inline void outb(u8 b, unsigned long addr)
{
writeb(b, (volatile void __iomem *) addr);
}
static inline void outw(u16 b, unsigned long addr)
{
writew(b, (volatile void __iomem *) addr);
}
static inline void outl(u32 b, unsigned long addr)
{
writel(b, (volatile void __iomem *) addr);
}
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
#define outb_p(x, addr) outb((x), (addr))
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
static inline void insb(unsigned long addr, void *buffer, int count)
{
if (count) {
u8 *buf = buffer;
do {
u8 x = inb(addr);
*buf++ = x;
} while (--count);
}
}
static inline void insw(unsigned long addr, void *buffer, int count)
{
if (count) {
u16 *buf = buffer;
do {
u16 x = inw(addr);
*buf++ = x;
} while (--count);
}
}
static inline void insl(unsigned long addr, void *buffer, int count)
{
if (count) {
u32 *buf = buffer;
do {
u32 x = inl(addr);
*buf++ = x;
} while (--count);
}
}
static inline void outsb(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u8 *buf = buffer;
do {
outb(*buf++, addr);
} while (--count);
}
}
static inline void outsw(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u16 *buf = buffer;
do {
outw(*buf++, addr);
} while (--count);
}
}
static inline void outsl(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u32 *buf = buffer;
do {
outl(*buf++, addr);
} while (--count);
}
}
#ifndef CONFIG_GENERIC_IOMAP
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
#define ioread32(addr) readl(addr)
#define iowrite8(v, addr) writeb((v), (addr))
#define iowrite16(v, addr) writew((v), (addr))
#define iowrite32(v, addr) writel((v), (addr))
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
#define ioread16_rep(p, dst, count) \
insw((unsigned long) (p), (dst), (count))
#define ioread32_rep(p, dst, count) \
insl((unsigned long) (p), (dst), (count))
#define iowrite8_rep(p, src, count) \
outsb((unsigned long) (p), (src), (count))
#define iowrite16_rep(p, src, count) \
outsw((unsigned long) (p), (src), (count))
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
#endif /* CONFIG_GENERIC_IOMAP */
#define IO_SPACE_LIMIT 0xffffffff
#ifdef __KERNEL__
#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *) (x))
#ifndef CONFIG_GENERIC_IOMAP
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
#endif /* CONFIG_GENERIC_IOMAP */
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
}
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return (void __iomem*) (unsigned long)offset;
}
#define __ioremap(offset, size, flags) ioremap(offset, size)
#ifndef ioremap_nocache
#define ioremap_nocache ioremap
#endif
#ifndef ioremap_wc
#define ioremap_wc ioremap_nocache
#endif
static inline void iounmap(void *addr)
{
}
#ifndef CONFIG_GENERIC_IOMAP
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return (void __iomem *) port;
}
static inline void ioport_unmap(void __iomem *p)
{
}
#else /* CONFIG_GENERIC_IOMAP */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#define xlate_dev_kmem_ptr(p) p
#define xlate_dev_mem_ptr(p) ((void *) (p))
#ifndef virt_to_bus
static inline unsigned long virt_to_bus(volatile void *address)
{
return ((unsigned long) address);
}
static inline void *bus_to_virt(unsigned long address)
{
return (void *) address;
}
#endif
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_IO_H */

View File

@@ -0,0 +1,105 @@
#ifndef _ASM_GENERIC_IOCTL_H
#define _ASM_GENERIC_IOCTL_H
/* ioctl command encoding: 32 bits total, command in lower 16 bits,
* size of the parameter structure in the lower 14 bits of the
* upper 16 bits.
* Encoding the size of the parameter structure in the ioctl request
* is useful for catching programs compiled with old versions
* and to avoid overwriting user space outside the user buffer area.
* The highest 2 bits are reserved for indicating the ``access mode''.
* NOTE: This limits the max parameter size to 16kB -1 !
*/
/*
* The following is for compatibility across the various Linux
* platforms. The generic ioctl numbering scheme doesn't really enforce
* a type field. De facto, however, the top 8 bits of the lower 16
* bits are indeed used as a type field, so we might just as well make
* this explicit here. Please be sure to use the decoding macros
* below from now on.
*/
#define _IOC_NRBITS 8
#define _IOC_TYPEBITS 8
/*
* Let any architecture override either of the following before
* including this file.
*/
#ifndef _IOC_SIZEBITS
# define _IOC_SIZEBITS 14
#endif
#ifndef _IOC_DIRBITS
# define _IOC_DIRBITS 2
#endif
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
#define _IOC_NRSHIFT 0
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
/*
* Direction bits, which any architecture can choose to override
* before including this file.
*/
#ifndef _IOC_NONE
# define _IOC_NONE 0U
#endif
#ifndef _IOC_WRITE
# define _IOC_WRITE 1U
#endif
#ifndef _IOC_READ
# define _IOC_READ 2U
#endif
#define _IOC(dir,type,nr,size) \
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
#ifdef __KERNEL__
/* provoke compile error for invalid uses of size argument */
extern unsigned int __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
#else
#define _IOC_TYPECHECK(t) (sizeof(t))
#endif
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
/* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
/* ...and for the drivers/sound files... */
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
#endif /* _ASM_GENERIC_IOCTL_H */

View File

@@ -0,0 +1,110 @@
#ifndef __ASM_GENERIC_IOCTLS_H
#define __ASM_GENERIC_IOCTLS_H
#include <linux/ioctl.h>
/*
* These are the most common definitions for tty ioctl numbers.
* Most of them do not use the recommended _IOC(), but there is
* probably some source code out there hardcoding the number,
* so we might as well use them for all new platforms.
*
* The architectures that use different values here typically
* try to be compatible with some Unix variants for the same
* architecture.
*/
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x5405
#define TCSETA 0x5406
#define TCSETAW 0x5407
#define TCSETAF 0x5408
#define TCSBRK 0x5409
#define TCXONC 0x540A
#define TCFLSH 0x540B
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP 0x540F
#define TIOCSPGRP 0x5410
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TCGETS2 _IOR('T', 0x2A, struct termios2)
#define TCSETS2 _IOW('T', 0x2B, struct termios2)
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
#define TIOCGRS485 0x542E
#define TIOCSRS485 0x542F
#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
#define TCSETX 0x5433
#define TCSETXF 0x5434
#define TCSETXW 0x5435
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
/*
* some architectures define FIOQSIZE as 0x545E, which is used for
* TIOCGHAYESESP on others
*/
#ifndef FIOQSIZE
# define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
# define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
# define FIOQSIZE 0x5460
#endif
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif /* __ASM_GENERIC_IOCTLS_H */

View File

@@ -0,0 +1,72 @@
#ifndef __GENERIC_IO_H
#define __GENERIC_IO_H
#include <linux/linkage.h>
#include <asm/byteorder.h>
/*
* These are the "generic" interfaces for doing new-style
* memory-mapped or PIO accesses. Architectures may do
* their own arch-optimized versions, these just act as
* wrappers around the old-style IO register access functions:
* read[bwl]/write[bwl]/in[bwl]/out[bwl]
*
* Don't include this directly, include it from <asm/io.h>.
*/
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines just encode the PIO/MMIO as part of the
* cookie, and coldly assume that the MMIO IO mappings are not
* in the low address range. Architectures for which this is not
* true can't use this generic implementation.
*/
extern unsigned int ioread8(void __iomem *);
extern unsigned int ioread16(void __iomem *);
extern unsigned int ioread16be(void __iomem *);
extern unsigned int ioread32(void __iomem *);
extern unsigned int ioread32be(void __iomem *);
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
extern void iowrite32(u32, void __iomem *);
extern void iowrite32be(u32, void __iomem *);
/*
* "string" versions of the above. Note that they
* use native byte ordering for the accesses (on
* the assumption that IO and memory agree on a
* byte order, and CPU byteorder is irrelevant).
*
* They do _not_ update the port address. If you
* want MMIO that copies stuff laid out in MMIO
* memory across multiple ports, use "memcpy_toio()"
* and friends.
*/
extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
#ifndef ARCH_HAS_IOREMAP_WC
#define ioremap_wc ioremap_nocache
#endif
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#endif

View File

@@ -0,0 +1,34 @@
#ifndef __ASM_GENERIC_IPCBUF_H
#define __ASM_GENERIC_IPCBUF_H
/*
* The generic ipc64_perm structure:
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* ipc64_perm was originally meant to be architecture specific, but
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
* Pad space is left for:
* - 32-bit mode_t on architectures that only had 16 bit
* - 32-bit seq
* - 2 miscellaneous 32-bit values
*/
struct ipc64_perm {
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
/* pad if mode_t is u16: */
unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
unsigned short seq;
unsigned short __pad2;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __ASM_GENERIC_IPCBUF_H */

View File

@@ -0,0 +1,18 @@
#ifndef __ASM_GENERIC_IRQ_H
#define __ASM_GENERIC_IRQ_H
/*
* NR_IRQS is the upper bound of how many interrupts can be handled
* in the platform. It is used to size the static irq_map array,
* so don't make it too big.
*/
#ifndef NR_IRQS
#define NR_IRQS 64
#endif
static inline int irq_canonicalize(int irq)
{
return irq;
}
#endif /* __ASM_GENERIC_IRQ_H */

View File

@@ -0,0 +1,37 @@
/* Fallback per-CPU frame pointer holder
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC_IRQ_REGS_H
#define _ASM_GENERIC_IRQ_REGS_H
#include <linux/percpu.h>
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack
*/
DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
return __get_cpu_var(__irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
old_regs = *pp_regs;
*pp_regs = new_regs;
return old_regs;
}
#endif /* _ASM_GENERIC_IRQ_REGS_H */

View File

@@ -0,0 +1,72 @@
#ifndef __ASM_GENERIC_IRQFLAGS_H
#define __ASM_GENERIC_IRQFLAGS_H
/*
* All architectures should implement at least the first two functions,
* usually inline assembly will be the best way.
*/
#ifndef RAW_IRQ_DISABLED
#define RAW_IRQ_DISABLED 0
#define RAW_IRQ_ENABLED 1
#endif
/* read interrupt enabled status */
#ifndef __raw_local_save_flags
unsigned long __raw_local_save_flags(void);
#endif
/* set interrupt enabled status */
#ifndef raw_local_irq_restore
void raw_local_irq_restore(unsigned long flags);
#endif
/* get status and disable interrupts */
#ifndef __raw_local_irq_save
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags;
flags = __raw_local_save_flags();
raw_local_irq_restore(RAW_IRQ_DISABLED);
return flags;
}
#endif
/* test flags */
#ifndef raw_irqs_disabled_flags
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return flags == RAW_IRQ_DISABLED;
}
#endif
/* unconditionally enable interrupts */
#ifndef raw_local_irq_enable
static inline void raw_local_irq_enable(void)
{
raw_local_irq_restore(RAW_IRQ_ENABLED);
}
#endif
/* unconditionally disable interrupts */
#ifndef raw_local_irq_disable
static inline void raw_local_irq_disable(void)
{
raw_local_irq_restore(RAW_IRQ_DISABLED);
}
#endif
/* test hardware interrupt enable bit */
#ifndef raw_irqs_disabled
static inline int raw_irqs_disabled(void)
{
return raw_irqs_disabled_flags(__raw_local_save_flags());
}
#endif
#define raw_local_save_flags(flags) \
do { (flags) = __raw_local_save_flags(); } while (0)
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
#endif /* __ASM_GENERIC_IRQFLAGS_H */

View File

@@ -0,0 +1,9 @@
#ifndef _ASM_GENERIC_KDEBUG_H
#define _ASM_GENERIC_KDEBUG_H
enum die_val {
DIE_UNUSED,
DIE_OOPS=1
};
#endif /* _ASM_GENERIC_KDEBUG_H */

View File

@@ -0,0 +1,36 @@
#ifndef _ASM_GENERIC_KMAP_TYPES_H
#define _ASM_GENERIC_KMAP_TYPES_H
#ifdef __WITH_KM_FENCE
# define KMAP_D(n) __KM_FENCE_##n ,
#else
# define KMAP_D(n)
#endif
enum km_type {
KMAP_D(0) KM_BOUNCE_READ,
KMAP_D(1) KM_SKB_SUNRPC_DATA,
KMAP_D(2) KM_SKB_DATA_SOFTIRQ,
KMAP_D(3) KM_USER0,
KMAP_D(4) KM_USER1,
KMAP_D(5) KM_BIO_SRC_IRQ,
KMAP_D(6) KM_BIO_DST_IRQ,
KMAP_D(7) KM_PTE0,
KMAP_D(8) KM_PTE1,
KMAP_D(9) KM_IRQ0,
KMAP_D(10) KM_IRQ1,
KMAP_D(11) KM_SOFTIRQ0,
KMAP_D(12) KM_SOFTIRQ1,
KMAP_D(13) KM_SYNC_ICACHE,
KMAP_D(14) KM_SYNC_DCACHE,
/* UML specific, for copy_*_user - used in do_op_one_page */
KMAP_D(15) KM_UML_USERCOPY,
KMAP_D(16) KM_IRQ_PTE,
KMAP_D(17) KM_NMI,
KMAP_D(18) KM_NMI_PTE,
KMAP_D(19) KM_TYPE_NR
};
#undef KMAP_D
#endif

View File

@@ -0,0 +1,7 @@
#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
#define __ASM_GENERIC_LIBATA_PORTMAP_H
#define ATA_PRIMARY_IRQ(dev) 14
#define ATA_SECONDARY_IRQ(dev) 15
#endif

View File

@@ -0,0 +1,8 @@
#ifndef __ASM_GENERIC_LINKAGE_H
#define __ASM_GENERIC_LINKAGE_H
/*
* linux/linkage.h provides reasonable defaults.
* an architecture can override them by providing its own version.
*/
#endif /* __ASM_GENERIC_LINKAGE_H */

View File

@@ -0,0 +1,74 @@
#ifndef _ASM_GENERIC_LOCAL_H
#define _ASM_GENERIC_LOCAL_H
#include <linux/percpu.h>
#include <asm/atomic.h>
#include <asm/types.h>
/*
* A signed long type for operations which are atomic for a single CPU.
* Usually used in combination with per-cpu variables.
*
* This is the default implementation, which uses atomic_long_t. Which is
* rather pointless. The whole point behind local_t is that some processors
* can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
* running on this CPU. local_t allows exploitation of such capabilities.
*/
/* Implement in terms of atomics. */
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
typedef struct
{
atomic_long_t a;
} local_t;
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l,i) atomic_long_set((&(l)->a),(i))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
#define local_add(i,l) atomic_long_add((i),(&(l)->a))
#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local_inc(l) local_set((l), local_read(l) + 1)
#define __local_dec(l) local_set((l), local_read(l) - 1)
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable (eg. mystruct.foo), not an address.
*/
#define cpu_local_read(l) local_read(&__get_cpu_var(l))
#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
/* Non-atomic increments, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well.
*/
#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
#endif /* _ASM_GENERIC_LOCAL_H */

View File

@@ -0,0 +1,77 @@
#ifndef __ASM_MEMORY_MODEL_H
#define __ASM_MEMORY_MODEL_H
#ifndef __ASSEMBLY__
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
#elif defined(CONFIG_DISCONTIGMEM)
#ifndef arch_pfn_to_nid
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
#endif
#ifndef arch_local_page_offset
#define arch_local_page_offset(pfn, nid) \
((pfn) - NODE_DATA(nid)->node_start_pfn)
#endif
#endif /* CONFIG_DISCONTIGMEM */
/*
* supports 3 memory models.
*/
#if defined(CONFIG_FLATMEM)
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
#elif defined(CONFIG_DISCONTIGMEM)
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
unsigned long __nid = arch_pfn_to_nid(__pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
__pgdat->node_start_pfn; \
})
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
/* memmap is virtually contigious. */
#define __pfn_to_page(pfn) (vmemmap + (pfn))
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
#elif defined(CONFIG_SPARSEMEM)
/*
* Note: section's mem_map is encorded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
#endif /* __ASSEMBLY__ */
#endif

View File

@@ -0,0 +1,18 @@
/*
* Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to
* be included in asm-FOO/mmu_context.h for any arch FOO which doesn't
* need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* _ASM_GENERIC_MM_HOOKS_H */

View File

@@ -0,0 +1,45 @@
#ifndef __ASM_GENERIC_MMAN_COMMON_H
#define __ASM_GENERIC_MMAN_COMMON_H
/*
Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
Based on: asm-xxx/mman.h
*/
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
#define MS_SYNC 4 /* synchronous memory sync */
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
/* common parameters: try to keep these consistent across architectures */
#define MADV_REMOVE 9 /* remove these pages & resources */
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
#define MADV_HWPOISON 100 /* poison a page for testing */
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
/* compatibility flags */
#define MAP_FILE 0
#endif /* __ASM_GENERIC_MMAN_COMMON_H */

View File

@@ -0,0 +1,19 @@
#ifndef __ASM_GENERIC_MMAN_H
#define __ASM_GENERIC_MMAN_H
#include <asm-generic/mman-common.h>
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#endif /* __ASM_GENERIC_MMAN_H */

View File

@@ -0,0 +1,15 @@
#ifndef __ASM_GENERIC_MMU_H
#define __ASM_GENERIC_MMU_H
/*
* This is the mmu.h header for nommu implementations.
* Architectures with an MMU need something more complex.
*/
#ifndef __ASSEMBLY__
typedef struct {
struct vm_list_struct *vmlist;
unsigned long end_brk;
} mm_context_t;
#endif
#endif /* __ASM_GENERIC_MMU_H */

View File

@@ -0,0 +1,45 @@
#ifndef __ASM_GENERIC_MMU_CONTEXT_H
#define __ASM_GENERIC_MMU_CONTEXT_H
/*
* Generic hooks for NOMMU architectures, which do not need to do
* anything special here.
*/
#include <asm-generic/mm_hooks.h>
struct task_struct;
struct mm_struct;
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
return 0;
}
static inline void destroy_context(struct mm_struct *mm)
{
}
static inline void deactivate_mm(struct task_struct *task,
struct mm_struct *mm)
{
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
}
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
}
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */

View File

@@ -0,0 +1,22 @@
#ifndef __ASM_GENERIC_MODULE_H
#define __ASM_GENERIC_MODULE_H
/*
* Many architectures just need a simple module
* loader without arch specific data.
*/
struct mod_arch_specific
{
};
#ifdef CONFIG_64BIT
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#else
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#endif
#endif /* __ASM_GENERIC_MODULE_H */

View File

@@ -0,0 +1,47 @@
#ifndef __ASM_GENERIC_MSGBUF_H
#define __ASM_GENERIC_MSGBUF_H
#include <asm/bitsperlong.h>
/*
* generic msqid64_ds structure.
*
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* msqid64_ds was originally meant to be architecture specific, but
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
* 64 bit architectures typically define a 64 bit __kernel_time_t,
* so they do not need the first three padding words.
* On big-endian systems, the padding is in the wrong place.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
#if __BITS_PER_LONG != 64
unsigned long __unused1;
#endif
__kernel_time_t msg_rtime; /* last msgrcv time */
#if __BITS_PER_LONG != 64
unsigned long __unused2;
#endif
__kernel_time_t msg_ctime; /* last change time */
#if __BITS_PER_LONG != 64
unsigned long __unused3;
#endif
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* __ASM_GENERIC_MSGBUF_H */

View File

@@ -0,0 +1,90 @@
/*
* include/asm-generic/mutex-dec.h
*
* Generic implementation of the mutex fastpath, based on atomic
* decrement/increment.
*/
#ifndef _ASM_GENERIC_MUTEX_DEC_H
#define _ASM_GENERIC_MUTEX_DEC_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns.
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
return 0;
}
/**
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 0
*
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
* In the failure case, this function is allowed to either set the value to
* 1, or to set it to a value lower than 1.
*
* If the implementation sets it to a value of lower than 1, then the
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 1
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: fallback function
*
* Change the count from 1 to a value lower than 1, and return 0 (failure)
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*
* If the architecture has no effective trylock variant, it should call the
* <fail_fn> spinlock-based trylock variant unconditionally.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
return 0;
}
#endif

View File

@@ -0,0 +1,19 @@
/*
* include/asm-generic/mutex-null.h
*
* Generic implementation of the mutex fastpath, based on NOP :-)
*
* This is used by the mutex-debugging infrastructure, but it can also
* be used by architectures that (for whatever reason) want to use the
* spinlock based slowpath.
*/
#ifndef _ASM_GENERIC_MUTEX_NULL_H
#define _ASM_GENERIC_MUTEX_NULL_H
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
#define __mutex_slowpath_needs_to_unlock() 1
#endif

View File

@@ -0,0 +1,111 @@
/*
* include/asm-generic/mutex-xchg.h
*
* Generic implementation of the mutex fastpath, based on xchg().
*
* NOTE: An xchg based implementation might be less optimal than an atomic
* decrement/increment based implementation. If your architecture
* has a reasonable atomic dec/inc then you should probably use
* asm-generic/mutex-dec.h instead, or you could open-code an
* optimized version in asm/mutex.h.
*/
#ifndef _ASM_GENERIC_MUTEX_XCHG_H
#define _ASM_GENERIC_MUTEX_XCHG_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
fail_fn(count);
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it
* wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
return fail_fn(count);
return 0;
}
/**
* __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 0
*
* try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
* In the failure case, this function is allowed to either set the value to
* 1, or to set it to a value lower than one.
* If the implementation sets it to a value of lower than one, the
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 1) != 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 0
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: spinlock based trylock implementation
*
* Change the count from 1 to a value lower than 1, and return 0 (failure)
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*
* If the architecture has no effective trylock variant, it should call the
* <fail_fn> spinlock-based trylock variant unconditionally.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int prev = atomic_xchg(count, 0);
if (unlikely(prev < 0)) {
/*
* The lock was marked contended so we must restore that
* state. If while doing so we get back a prev value of 1
* then we just own it.
*
* [ In the rare case of the mutex going to 1, to 0, to -1
* and then back to 0 in this few-instructions window,
* this has the potential to trigger the slowpath for the
* owner's unlock path needlessly, but that's not a problem
* in practice. ]
*/
prev = atomic_xchg(count, prev);
if (prev < 0)
prev = 0;
}
return prev;
}
#endif

View File

@@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_MUTEX_H
#define __ASM_GENERIC_MUTEX_H
/*
* Pull in the generic implementation for the mutex fastpath,
* which is a reasonable default on many architectures.
*/
#include <asm-generic/mutex-dec.h>
#endif /* __ASM_GENERIC_MUTEX_H */

View File

@@ -0,0 +1,99 @@
#ifndef __ASM_GENERIC_PAGE_H
#define __ASM_GENERIC_PAGE_H
/*
* Generic page.h implementation, for NOMMU architectures.
* This provides the dummy definitions for the memory management.
*/
#ifdef CONFIG_MMU
#error need to prove a real asm/page.h
#endif
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#ifdef __ASSEMBLY__
#define PAGE_SIZE (1 << PAGE_SHIFT)
#else
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <asm/setup.h>
#ifndef __ASSEMBLY__
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/*
* These are used to make use of C type-checking..
*/
typedef struct {
unsigned long pte;
} pte_t;
typedef struct {
unsigned long pmd[16];
} pmd_t;
typedef struct {
unsigned long pgd;
} pgd_t;
typedef struct {
unsigned long pgprot;
} pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((&x)->pmd[0])
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
extern unsigned long memory_start;
extern unsigned long memory_end;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
#else
#define PAGE_OFFSET (0)
#endif
#ifndef __ASSEMBLY__
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
#ifndef page_to_phys
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
((void *)(kaddr) < (void *)memory_end))
#endif /* __ASSEMBLY__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#endif /* __ASM_GENERIC_PAGE_H */

View File

@@ -0,0 +1,24 @@
#ifndef __ASM_GENERIC_PARAM_H
#define __ASM_GENERIC_PARAM_H
#ifdef __KERNEL__
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
# define USER_HZ 100 /* some user interfaces are */
# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
#endif
#ifndef HZ
#define HZ 100
#endif
#ifndef EXEC_PAGESIZE
#define EXEC_PAGESIZE 4096
#endif
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* __ASM_GENERIC_PARAM_H */

View File

@@ -0,0 +1,23 @@
#ifndef __ASM_GENERIC_PARPORT_H
#define __ASM_GENERIC_PARPORT_H
/*
* An ISA bus may have i8255 parallel ports at well-known
* locations in the I/O space, which are scanned by
* parport_pc_find_isa_ports.
*
* Without ISA support, the driver will only attach
* to devices on the PCI bus.
*/
static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
#ifdef CONFIG_ISA
return parport_pc_find_isa_ports(autoirq, autodma);
#else
return 0;
#endif
}
#endif /* __ASM_GENERIC_PARPORT_H */

View File

@@ -0,0 +1,107 @@
/* include this file if the platform implements the dma_ DMA Mapping API
* and wants to provide the pci_ DMA Mapping API in terms of it */
#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
#define _ASM_GENERIC_PCI_DMA_COMPAT_H
#include <linux/dma-mapping.h>
/* note pci_set_dma_mask isn't here, since it's a public function
* exported from drivers/pci, use dma_supported instead */
static inline int
pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
}
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
}
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
}
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline int
pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
return dma_mapping_error(&pdev->dev, dma_addr);
}
#endif

View File

@@ -0,0 +1,61 @@
/*
* linux/include/asm-generic/pci.h
*
* Copyright (C) 2003 Russell King
*/
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
/**
* pcibios_resource_to_bus - convert resource to PCI bus address
* @dev: device which owns this resource
* @region: converted bus-centric region (start,end)
* @res: resource to convert
*
* Convert a resource to a PCI device bus address or bus window.
*/
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
}
static inline void
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region)
{
res->start = region->start;
res->end = region->end;
}
static inline struct resource *
pcibios_select_root(struct pci_dev *pdev, struct resource *res)
{
struct resource *root = NULL;
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
return root;
}
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
/*
* By default, assume that no iommu is in use and that the PCI
* space is mapped to address physical 0.
*/
#ifndef PCI_DMA_BUS_IS_PHYS
#define PCI_DMA_BUS_IS_PHYS (1)
#endif
#endif /* _ASM_GENERIC_PCI_H */

View File

@@ -0,0 +1,107 @@
#ifndef _ASM_GENERIC_PERCPU_H_
#define _ASM_GENERIC_PERCPU_H_
#include <linux/compiler.h>
#include <linux/threads.h>
#include <linux/percpu-defs.h>
#ifdef CONFIG_SMP
/*
* per_cpu_offset() is the offset that has to be added to a
* percpu variable to get to the instance for a certain processor.
*
* Most arches use the __per_cpu_offset array for those offsets but
* some arches have their own ways of determining the offset (x86_64, s390).
*/
#ifndef __per_cpu_offset
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu_offset(x) (__per_cpu_offset[x])
#endif
/*
* Determine the offset for the currently active processor.
* An arch may define __my_cpu_offset to provide a more effective
* means of obtaining the offset to the per cpu variables of the
* current processor.
*/
#ifndef __my_cpu_offset
#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
#endif
#ifdef CONFIG_DEBUG_PREEMPT
#define my_cpu_offset per_cpu_offset(smp_processor_id())
#else
#define my_cpu_offset __my_cpu_offset
#endif
/*
* Add a offset to a pointer but keep the pointer as is.
*
* Only S390 provides its own means of moving the pointer.
*/
#ifndef SHIFT_PERCPU_PTR
#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
#endif
/*
* A percpu variable may point to a discarded regions. The following are
* established ways to produce a usable pointer from the percpu variable
* offset.
*/
#define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
#endif
#else /* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
#endif /* SMP */
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
#define PER_CPU_BASE_SECTION ".data.percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
#endif
#ifdef CONFIG_SMP
#ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
#endif
#define PER_CPU_FIRST_SECTION ".first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
#ifndef PER_CPU_ATTRIBUTES
#define PER_CPU_ATTRIBUTES
#endif
#ifndef PER_CPU_DEF_ATTRIBUTES
#define PER_CPU_DEF_ATTRIBUTES
#endif
#endif /* _ASM_GENERIC_PERCPU_H_ */

View File

@@ -0,0 +1,12 @@
#ifndef __ASM_GENERIC_PGALLOC_H
#define __ASM_GENERIC_PGALLOC_H
/*
* an empty file is enough for a nommu architecture
*/
#ifdef CONFIG_MMU
#error need to implement an architecture specific asm/pgalloc.h
#endif
#define check_pgt_cache() do { } while (0)
#endif /* __ASM_GENERIC_PGALLOC_H */

View File

@@ -0,0 +1,69 @@
#ifndef _PGTABLE_NOPMD_H
#define _PGTABLE_NOPMD_H
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable-nopud.h>
struct mm_struct;
#define __PAGETABLE_PMD_FOLDED
/*
* Having the pmd type consist of a pud gets the size right, and allows
* us to conceptually access the pud entry that this pmd is folded into
* without casting.
*/
typedef struct { pud_t pud; } pmd_t;
#define PMD_SHIFT PUD_SHIFT
#define PTRS_PER_PMD 1
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/*
* The "pud_xxx()" functions here are trivial for a folded two-level
* setup: the pmd is never bad, and a pmd always exists (as it's folded
* into the pud entry)
*/
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
#define pud_populate(mm, pmd, pte) do { } while (0)
/*
* (pmds are folded into puds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
#define pmd_val(x) (pud_val((x).pud))
#define __pmd(x) ((pmd_t) { __pud(x) } )
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pud, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, address) NULL
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
}
#define __pmd_free_tlb(tlb, x, a) do { } while (0)
#undef pmd_addr_end
#define pmd_addr_end(addr, end) (end)
#endif /* __ASSEMBLY__ */
#endif /* _PGTABLE_NOPMD_H */

View File

@@ -0,0 +1,61 @@
#ifndef _PGTABLE_NOPUD_H
#define _PGTABLE_NOPUD_H
#ifndef __ASSEMBLY__
#define __PAGETABLE_PUD_FOLDED
/*
* Having the pud type consist of a pgd gets the size right, and allows
* us to conceptually access the pgd entry that this pud is folded into
* without casting.
*/
typedef struct { pgd_t pgd; } pud_t;
#define PUD_SHIFT PGDIR_SHIFT
#define PTRS_PER_PUD 1
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pud is never bad, and a pud always exists (as it's folded
* into the pgd entry)
*/
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
#define pgd_populate(mm, pgd, pud) do { } while (0)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
{
return (pud_t *)pgd;
}
#define pud_val(x) (pgd_val((x).pgd))
#define __pud(x) ((pud_t) { __pgd(x) } )
#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
* inside the pgd, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x, a) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
#endif /* __ASSEMBLY__ */
#endif /* _PGTABLE_NOPUD_H */

View File

@@ -0,0 +1,349 @@
#ifndef _ASM_GENERIC_PGTABLE_H
#define _ASM_GENERIC_PGTABLE_H
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Largely same as above, but only sets the access flags (dirty,
* accessed, and writable). Furthermore, we know it always gets set
* to a "more permissive" setting, which allows most architectures
* to optimize this. We return whether the PTE actually changed, which
* in turn instructs the caller to do things like update__mmu_cache.
* This used to be done in the caller, but sparc needs minor faults to
* force that call on sun4c so we changed this macro slightly
*/
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \
set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
flush_tlb_page(__vma, __address); \
} \
__changed; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __address, __ptep) \
({ \
pte_t __pte = *(__ptep); \
int r = 1; \
if (!pte_young(__pte)) \
r = 0; \
else \
set_pte_at((__vma)->vm_mm, (__address), \
(__ptep), pte_mkold(__pte)); \
r; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
int __young; \
__young = ptep_test_and_clear_young(__vma, __address, __ptep); \
if (__young) \
flush_tlb_page(__vma, __address); \
__young; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
pte_t __pte = *(__ptep); \
pte_clear((__mm), (__address), (__ptep)); \
__pte; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
({ \
pte_t __pte; \
__pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
__pte; \
})
#endif
/*
* Some architectures may be able to avoid expensive synchronization
* primitives when modifications are made to PTE's which are already
* not present, or in the process of an address space destruction.
*/
#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
do { \
pte_clear((__mm), (__address), (__ptep)); \
} while (0)
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define ptep_clear_flush(__vma, __address, __ptep) \
({ \
pte_t __pte; \
__pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
flush_tlb_page(__vma, __address); \
__pte; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
}
#endif
#ifndef __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (pte_val(A) == pte_val(B))
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define page_test_dirty(page) (0)
#endif
#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
#define page_clear_dirty(page) do { } while (0)
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define pte_maybe_dirty(pte) pte_dirty(pte)
#else
#define pte_maybe_dirty(pte) (1)
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#define page_test_and_clear_young(page) (0)
#endif
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
#ifndef __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
#ifndef pgprot_noncached
#define pgprot_noncached(prot) (prot)
#endif
#ifndef pgprot_writecombine
#define pgprot_writecombine pgprot_noncached
#endif
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
* vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
*/
#define pgd_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#ifndef pud_addr_end
#define pud_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#endif
#ifndef pmd_addr_end
#define pmd_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#endif
/*
* When walking page tables, we usually want to skip any p?d_none entries;
* and any p?d_bad entries - reporting the error before resetting to none.
* Do the tests inline, but report and clear the bad entry in mm/memory.c.
*/
void pgd_clear_bad(pgd_t *);
void pud_clear_bad(pud_t *);
void pmd_clear_bad(pmd_t *);
static inline int pgd_none_or_clear_bad(pgd_t *pgd)
{
if (pgd_none(*pgd))
return 1;
if (unlikely(pgd_bad(*pgd))) {
pgd_clear_bad(pgd);
return 1;
}
return 0;
}
static inline int pud_none_or_clear_bad(pud_t *pud)
{
if (pud_none(*pud))
return 1;
if (unlikely(pud_bad(*pud))) {
pud_clear_bad(pud);
return 1;
}
return 0;
}
static inline int pmd_none_or_clear_bad(pmd_t *pmd)
{
if (pmd_none(*pmd))
return 1;
if (unlikely(pmd_bad(*pmd))) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}
static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
/*
* Get the current pte state, but zero it out to make it
* non-present, preventing the hardware from asynchronously
* updating it.
*/
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
/*
* The pte is non-present, so there's no hardware state to
* preserve.
*/
set_pte_at(mm, addr, ptep, pte);
}
#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
/*
* Start a pte protection read-modify-write transaction, which
* protects against asynchronous hardware modifications to the pte.
* The intention is not to prevent the hardware from making pte
* updates, but to prevent any updates it may make from being lost.
*
* This does not protect against other software modifications of the
* pte; the appropriate pte lock must be held over the transation.
*
* Note that this interface is intended to be batchable, meaning that
* ptep_modify_prot_commit may not actually update the pte, but merely
* queue the update to be done at some later time. The update must be
* actually committed before the pte lock is released, however.
*/
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
return __ptep_modify_prot_start(mm, addr, ptep);
}
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
* the PTE unmodified.
*/
static inline void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
__ptep_modify_prot_commit(mm, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
#endif /* CONFIG_MMU */
/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
* is issued. Some architectures may benefit from doing this, and it is
* beneficial for both shadow and direct mode hypervisors, which may batch
* the PTE updates which happen during this window. Note that using this
* interface requires that read hazards be removed from the code. A read
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
* up to date. This mode can only be entered and left under the protection of
* the page table locks for all page tables which may be modified. In the UP
* case, this is required so that preemption is disabled, and in the SMP case,
* it must synchronize the delayed page table writes properly on other CPUs.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_flush_lazy_mmu_mode() do {} while (0)
#endif
/*
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
* update (lazy) modes (CPU, MMU) should be active at any given time,
* entry should never be nested, and entry and exits should always be
* paired. This is for sanity of maintaining and reasoning about the
* kernel code. In this case, the exit (end of the context switch) is
* in architecture-specific code, and so doesn't need a generic
* definition.
*/
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
#define arch_start_context_switch(prev) do {} while (0)
#endif
#ifndef __HAVE_PFNMAP_TRACKING
/*
* Interface that can be used by architecture code to keep track of
* memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
*
* track_pfn_vma_new is called when a _new_ pfn mapping is being established
* for physical range indicated by pfn and size.
*/
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long size)
{
return 0;
}
/*
* Interface that can be used by architecture code to keep track of
* memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
*
* track_pfn_vma_copy is called when vma that is covering the pfnmap gets
* copied through copy_page_range().
*/
static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
{
return 0;
}
/*
* Interface that can be used by architecture code to keep track of
* memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
*
* untrack_pfn_vma is called while unmapping a pfnmap for a region.
* untrack can be called for a specific region indicated by pfn and size or
* can be for the entire vma (in which case size can be zero).
*/
static inline void untrack_pfn_vma(struct vm_area_struct *vma,
unsigned long pfn, unsigned long size)
{
}
#else
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long size);
extern int track_pfn_vma_copy(struct vm_area_struct *vma);
extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_GENERIC_PGTABLE_H */

View File

@@ -0,0 +1,37 @@
#ifndef __ASM_GENERIC_POLL_H
#define __ASM_GENERIC_POLL_H
/* These are specified by iBCS2 */
#define POLLIN 0x0001
#define POLLPRI 0x0002
#define POLLOUT 0x0004
#define POLLERR 0x0008
#define POLLHUP 0x0010
#define POLLNVAL 0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
#define POLLRDNORM 0x0040
#define POLLRDBAND 0x0080
#ifndef POLLWRNORM
#define POLLWRNORM 0x0100
#endif
#ifndef POLLWRBAND
#define POLLWRBAND 0x0200
#endif
#ifndef POLLMSG
#define POLLMSG 0x0400
#endif
#ifndef POLLREMOVE
#define POLLREMOVE 0x1000
#endif
#ifndef POLLRDHUP
#define POLLRDHUP 0x2000
#endif
struct pollfd {
int fd;
short events;
short revents;
};
#endif /* __ASM_GENERIC_POLL_H */

View File

@@ -0,0 +1,165 @@
#ifndef __ASM_GENERIC_POSIX_TYPES_H
#define __ASM_GENERIC_POSIX_TYPES_H
#include <asm/bitsperlong.h>
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc.
*
* First the types that are often defined in different ways across
* architectures, so that you can override them.
*/
#ifndef __kernel_ino_t
typedef unsigned long __kernel_ino_t;
#endif
#ifndef __kernel_mode_t
typedef unsigned int __kernel_mode_t;
#endif
#ifndef __kernel_nlink_t
typedef unsigned long __kernel_nlink_t;
#endif
#ifndef __kernel_pid_t
typedef int __kernel_pid_t;
#endif
#ifndef __kernel_ipc_pid_t
typedef int __kernel_ipc_pid_t;
#endif
#ifndef __kernel_uid_t
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
#endif
#ifndef __kernel_suseconds_t
typedef long __kernel_suseconds_t;
#endif
#ifndef __kernel_daddr_t
typedef int __kernel_daddr_t;
#endif
#ifndef __kernel_uid32_t
typedef __kernel_uid_t __kernel_uid32_t;
typedef __kernel_gid_t __kernel_gid32_t;
#endif
#ifndef __kernel_old_uid_t
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
#endif
#ifndef __kernel_old_dev_t
typedef unsigned int __kernel_old_dev_t;
#endif
/*
* Most 32 bit architectures use "unsigned int" size_t,
* and all 64 bit architectures use "unsigned long" size_t.
*/
#ifndef __kernel_size_t
#if __BITS_PER_LONG != 64
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
#else
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
#endif
#endif
/*
* anything below here should be completely generic
*/
typedef long __kernel_off_t;
typedef long long __kernel_loff_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef struct {
int val[2];
} __kernel_fsid_t;
#ifdef __KERNEL__
#undef __FD_SET
static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
}
#undef __FD_CLR
static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
}
#undef __FD_ISSET
static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
}
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static inline void __FD_ZERO(__kernel_fd_set *__p)
{
unsigned long *__tmp = __p->fds_bits;
int __i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 16:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
__tmp[ 8] = 0; __tmp[ 9] = 0;
__tmp[10] = 0; __tmp[11] = 0;
__tmp[12] = 0; __tmp[13] = 0;
__tmp[14] = 0; __tmp[15] = 0;
return;
case 8:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
return;
case 4:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
return;
}
}
__i = __FDSET_LONGS;
while (__i) {
__i--;
*__tmp = 0;
__tmp++;
}
}
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_POSIX_TYPES_H */

View File

@@ -0,0 +1,94 @@
#ifndef _ASM_GENERIC_RESOURCE_H
#define _ASM_GENERIC_RESOURCE_H
/*
* Resource limit IDs
*
* ( Compatibility detail: there are architectures that have
* a different rlimit ID order in the 5-9 range and want
* to keep that order for binary compatibility. The reasons
* are historic and all new rlimits are identical across all
* arches. If an arch has such special order for some rlimits
* then it defines them prior including asm-generic/resource.h. )
*/
#define RLIMIT_CPU 0 /* CPU time in sec */
#define RLIMIT_FSIZE 1 /* Maximum filesize */
#define RLIMIT_DATA 2 /* max data size */
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_CORE 4 /* max core file size */
#ifndef RLIMIT_RSS
# define RLIMIT_RSS 5 /* max resident set size */
#endif
#ifndef RLIMIT_NPROC
# define RLIMIT_NPROC 6 /* max number of processes */
#endif
#ifndef RLIMIT_NOFILE
# define RLIMIT_NOFILE 7 /* max number of open files */
#endif
#ifndef RLIMIT_MEMLOCK
# define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
#endif
#ifndef RLIMIT_AS
# define RLIMIT_AS 9 /* address space limit */
#endif
#define RLIMIT_LOCKS 10 /* maximum file locks held */
#define RLIMIT_SIGPENDING 11 /* max number of pending signals */
#define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */
#define RLIMIT_NICE 13 /* max nice prio allowed to raise to
0-39 for nice level 19 .. -20 */
#define RLIMIT_RTPRIO 14 /* maximum realtime priority */
#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */
#define RLIM_NLIMITS 16
/*
* SuS says limits have to be unsigned.
* Which makes a ton more sense anyway.
*
* Some architectures override this (for compatibility reasons):
*/
#ifndef RLIM_INFINITY
# define RLIM_INFINITY (~0UL)
#endif
/*
* RLIMIT_STACK default maximum - some architectures override it:
*/
#ifndef _STK_LIM_MAX
# define _STK_LIM_MAX RLIM_INFINITY
#endif
#ifdef __KERNEL__
/*
* boot-time rlimit defaults for the init task:
*/
#define INIT_RLIMITS \
{ \
[RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \
[RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
[RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_NPROC] = { 0, 0 }, \
[RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \
[RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
[RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_SIGPENDING] = { 0, 0 }, \
[RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
[RLIMIT_NICE] = { 0, 0 }, \
[RLIMIT_RTPRIO] = { 0, 0 }, \
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
}
#endif /* __KERNEL__ */
#endif

View File

@@ -0,0 +1,218 @@
/*
* include/asm-generic/rtc.h
*
* Author: Tom Rini <trini@mvista.com>
*
* Based on:
* drivers/char/rtc.c
*
* Please read the COPYING file for all license details.
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
/* some dummy definitions */
#define RTC_BATT_BAD 0x100 /* battery bad */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
/*
* Returns true if a clock update is in progress
*/
static inline unsigned char rtc_is_updating(void)
{
unsigned char uip;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
spin_unlock_irqrestore(&rtc_lock, flags);
return uip;
}
static inline unsigned int __get_rtc_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
#endif
/*
* read RTC once any update in progress is done. The update
* can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
if (rtc_is_updating())
mdelay(20);
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irqsave(&rtc_lock, flags);
time->tm_sec = CMOS_READ(RTC_SECONDS);
time->tm_min = CMOS_READ(RTC_MINUTES);
time->tm_hour = CMOS_READ(RTC_HOURS);
time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
time->tm_mon = CMOS_READ(RTC_MONTH);
time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
time->tm_min = bcd2bin(time->tm_min);
time->tm_hour = bcd2bin(time->tm_hour);
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if (time->tm_year <= 69)
time->tm_year += 100;
time->tm_mon--;
return RTC_24H;
}
#ifndef get_rtc_time
#define get_rtc_time __get_rtc_time
#endif
/* Set the current date and time in the real time clock. */
static inline int __set_rtc_time(struct rtc_time *time)
{
unsigned long flags;
unsigned char mon, day, hrs, min, sec;
unsigned char save_control, save_freq_select;
unsigned int yrs;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
day = time->tm_mday;
hrs = time->tm_hour;
min = time->tm_min;
sec = time->tm_sec;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
#ifdef CONFIG_MACH_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
!((yrs + 1900) % 400));
yrs = 72;
/*
* We want to keep the year set to 73 until March
* for non-leap years, so that Feb, 29th is handled
* correctly.
*/
if (!leap_yr && mon < 3) {
real_yrs--;
yrs = 73;
}
#endif
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
if (yrs > 169) {
spin_unlock_irqrestore(&rtc_lock, flags);
return -EINVAL;
}
if (yrs >= 100)
yrs -= 100;
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
}
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
#endif
CMOS_WRITE(yrs, RTC_YEAR);
CMOS_WRITE(mon, RTC_MONTH);
CMOS_WRITE(day, RTC_DAY_OF_MONTH);
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
#ifndef set_rtc_time
#define set_rtc_time __set_rtc_time
#endif
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __ASM_RTC_H__ */

View File

@@ -0,0 +1,43 @@
#ifndef __ASM_GENERIC_SCATTERLIST_H
#define __ASM_GENERIC_SCATTERLIST_H
#include <linux/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
unsigned int dma_length;
};
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
#ifndef sg_dma_len
/*
* Normally, you have an iommu on 64 bit machines, but not on 32 bit
* machines. Architectures that are differnt should override this.
*/
#if __BITS_PER_LONG == 64
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif /* 64 bit */
#endif /* sg_dma_len */
#ifndef ISA_DMA_THRESHOLD
#define ISA_DMA_THRESHOLD (~0UL)
#endif
#define ARCH_HAS_SG_CHAIN
#endif /* __ASM_GENERIC_SCATTERLIST_H */

Some files were not shown because too many files have changed in this diff Show More