add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,25 @@
#
# Copyright 2003 PathScale, Inc.
#
# Licensed under the GPL
#
obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
sysrq.o ksyms.o tls.o
subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
lib/rwsem_64.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
ldt-y = ../sys-i386/ldt.o
USER_OBJS := ptrace_user.o
USER_OBJS += user-offsets.s
extra-y += user-offsets.s
UNPROFILE_OBJS := stub_segv.o
CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
include arch/um/scripts/Makefile.rules

View File

@@ -0,0 +1,16 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __UM_ARCHPARAM_X86_64_H
#define __UM_ARCHPARAM_X86_64_H
/* No user-accessible fixmap addresses, i.e. vsyscall */
#define FIXADDR_USER_START 0
#define FIXADDR_USER_END 0
#endif

View File

@@ -0,0 +1,119 @@
/*
* Copyright 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* Licensed under the GPL
*/
#ifndef __UM_ELF_X86_64_H
#define __UM_ELF_X86_64_H
#include <asm/user.h>
#include "skas.h"
/* x86-64 relocation types, taken from asm-x86_64/elf.h */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_X86_64
#define ELF_PLAT_INIT(regs, load_addr) do { \
PT_REGS_RBX(regs) = 0; \
PT_REGS_RCX(regs) = 0; \
PT_REGS_RDX(regs) = 0; \
PT_REGS_RSI(regs) = 0; \
PT_REGS_RDI(regs) = 0; \
PT_REGS_RBP(regs) = 0; \
PT_REGS_RAX(regs) = 0; \
PT_REGS_R8(regs) = 0; \
PT_REGS_R9(regs) = 0; \
PT_REGS_R10(regs) = 0; \
PT_REGS_R11(regs) = 0; \
PT_REGS_R12(regs) = 0; \
PT_REGS_R13(regs) = 0; \
PT_REGS_R14(regs) = 0; \
PT_REGS_R15(regs) = 0; \
} while (0)
#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
(pr_reg)[0] = (_regs)->regs.gp[0]; \
(pr_reg)[1] = (_regs)->regs.gp[1]; \
(pr_reg)[2] = (_regs)->regs.gp[2]; \
(pr_reg)[3] = (_regs)->regs.gp[3]; \
(pr_reg)[4] = (_regs)->regs.gp[4]; \
(pr_reg)[5] = (_regs)->regs.gp[5]; \
(pr_reg)[6] = (_regs)->regs.gp[6]; \
(pr_reg)[7] = (_regs)->regs.gp[7]; \
(pr_reg)[8] = (_regs)->regs.gp[8]; \
(pr_reg)[9] = (_regs)->regs.gp[9]; \
(pr_reg)[10] = (_regs)->regs.gp[10]; \
(pr_reg)[11] = (_regs)->regs.gp[11]; \
(pr_reg)[12] = (_regs)->regs.gp[12]; \
(pr_reg)[13] = (_regs)->regs.gp[13]; \
(pr_reg)[14] = (_regs)->regs.gp[14]; \
(pr_reg)[15] = (_regs)->regs.gp[15]; \
(pr_reg)[16] = (_regs)->regs.gp[16]; \
(pr_reg)[17] = (_regs)->regs.gp[17]; \
(pr_reg)[18] = (_regs)->regs.gp[18]; \
(pr_reg)[19] = (_regs)->regs.gp[19]; \
(pr_reg)[20] = (_regs)->regs.gp[20]; \
(pr_reg)[21] = current->thread.arch.fs; \
(pr_reg)[22] = 0; \
(pr_reg)[23] = 0; \
(pr_reg)[24] = 0; \
(pr_reg)[25] = 0; \
(pr_reg)[26] = 0;
extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
#ifdef TIF_IA32 /* XXX */
#error XXX, indeed
clear_thread_flag(TIF_IA32);
#endif
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
extern long elf_aux_hwcap;
#define ELF_HWCAP (elf_aux_hwcap)
#define ELF_PLATFORM "x86_64"
#define SET_PERSONALITY(ex) do ; while(0)
#endif

View File

@@ -0,0 +1,20 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __UM_MODULE_X86_64_H
#define __UM_MODULE_X86_64_H
/* UML is simple */
struct mod_arch_specific
{
};
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#endif

View File

@@ -0,0 +1,56 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __UM_PROCESSOR_X86_64_H
#define __UM_PROCESSOR_X86_64_H
/* include faultinfo structure */
#include "sysdep/faultinfo.h"
struct arch_thread {
unsigned long debugregs[8];
int debugregs_seq;
unsigned long fs;
struct faultinfo faultinfo;
};
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
__asm__ __volatile__("rep;nop": : :"memory");
}
#define cpu_relax() rep_nop()
#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
.debugregs_seq = 0, \
.fs = 0, \
.faultinfo = { 0, 0, 0 } }
static inline void arch_flush_thread(struct arch_thread *thread)
{
}
static inline void arch_copy_thread(struct arch_thread *from,
struct arch_thread *to)
{
to->fs = from->fs;
}
#include <asm/user.h>
#define current_text_addr() \
({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
#define ARCH_IS_STACKGROW(address) \
(address + 128 >= UPT_SP(&current->thread.regs.regs))
#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
#include "asm/processor-generic.h"
#endif

View File

@@ -0,0 +1,72 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __UM_PTRACE_X86_64_H
#define __UM_PTRACE_X86_64_H
#include "linux/compiler.h"
#include "asm/errno.h"
#define __FRAME_OFFSETS /* Needed to get the R* macros */
#include "asm/ptrace-generic.h"
#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs)
#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs)
#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs)
#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs)
#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs)
#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs)
#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs)
#define PT_REGS_R8(r) UPT_R8(&(r)->regs)
#define PT_REGS_R9(r) UPT_R9(&(r)->regs)
#define PT_REGS_R10(r) UPT_R10(&(r)->regs)
#define PT_REGS_R11(r) UPT_R11(&(r)->regs)
#define PT_REGS_R12(r) UPT_R12(&(r)->regs)
#define PT_REGS_R13(r) UPT_R13(&(r)->regs)
#define PT_REGS_R14(r) UPT_R14(&(r)->regs)
#define PT_REGS_R15(r) UPT_R15(&(r)->regs)
#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs)
#define PT_REGS_RIP(r) UPT_IP(&(r)->regs)
#define PT_REGS_RSP(r) UPT_SP(&(r)->regs)
#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
/* XXX */
#define user_mode(r) UPT_IS_USER(&(r)->regs)
#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r)
#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r)
#define PT_FIX_EXEC_STACK(sp) do ; while(0)
#define profile_pc(regs) PT_REGS_IP(regs)
struct user_desc;
static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
struct user_desc __user *user_desc)
{
return -ENOSYS;
}
static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
struct user_desc __user *user_desc)
{
return -ENOSYS;
}
extern long arch_prctl(struct task_struct *task, int code,
unsigned long __user *addr);
#endif

View File

@@ -0,0 +1,21 @@
/*
* Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL V2
*/
#include <linux/uaccess.h>
/*
* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
* that's not relevant in skas mode.
*/
int is_valid_bugaddr(unsigned long eip)
{
unsigned short ud2;
if (probe_kernel_address((unsigned short __user *)eip, ud2))
return 0;
return ud2 == 0x0b0f;
}

View File

@@ -0,0 +1,15 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#include "sysdep/ptrace.h"
void arch_check_bugs(void)
{
}
void arch_examine_signal(int sig, struct uml_pt_regs *regs)
{
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright 2003 PathScale, Inc.
* Copied from arch/x86_64
*
* Licensed under the GPL
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <asm/processor.h>
#include <asm/param.h>
void __delay(unsigned long loops)
{
unsigned long i;
for(i = 0; i < loops; i++)
cpu_relax();
}
void __udelay(unsigned long usecs)
{
unsigned long i, n;
n = (loops_per_jiffy * HZ * usecs) / MILLION;
for(i=0;i<n;i++)
cpu_relax();
}
EXPORT_SYMBOL(__udelay);

View File

@@ -0,0 +1,28 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#include "sysdep/ptrace.h"
/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
struct exception_table_entry
{
unsigned long insn;
unsigned long fixup;
};
const struct exception_table_entry *search_exception_tables(unsigned long add);
int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(address);
if (fixup != 0) {
UPT_IP(regs) = fixup->fixup;
return 1;
}
return 0;
}

View File

@@ -0,0 +1,11 @@
#include <linux/module.h>
#include <asm/string.h>
#include <asm/checksum.h>
/*XXX: we need them because they would be exported by x86_64 */
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
EXPORT_SYMBOL(memcpy);
#else
EXPORT_SYMBOL(__memcpy);
#endif
EXPORT_SYMBOL(csum_partial);

View File

@@ -0,0 +1,16 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#include "linux/mm.h"
#include "asm/page.h"
#include "asm/mman.h"
unsigned long vm_stack_flags = __VM_STACK_FLAGS;
unsigned long vm_stack_flags32 = __VM_STACK_FLAGS;
unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
unsigned long vm_force_exec32 = PROT_EXEC;

View File

@@ -0,0 +1,195 @@
/*
* Copyright 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* Licensed under the GPL
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/errno.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <asm/uaccess.h>
/*
* determines which flags the user has access to.
* 1 = access 0 = no access
*/
#define FLAG_MASK 0x44dd5UL
int putreg(struct task_struct *child, int regno, unsigned long value)
{
unsigned long tmp;
#ifdef TIF_IA32
/*
* Some code in the 64bit emulation may not be 64bit clean.
* Don't take any chances.
*/
if (test_tsk_thread_flag(child, TIF_IA32))
value &= 0xffffffff;
#endif
switch (regno) {
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
if (value && (value & 3) != 3)
return -EIO;
value &= 0xffff;
break;
case FS_BASE:
case GS_BASE:
if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
return -EIO;
break;
case EFLAGS:
value &= FLAG_MASK;
tmp = PT_REGS_EFLAGS(&child->thread.regs) & ~FLAG_MASK;
value |= tmp;
break;
}
PT_REGS_SET(&child->thread.regs, regno, value);
return 0;
}
int poke_user(struct task_struct *child, long addr, long data)
{
if ((addr & 3) || addr < 0)
return -EIO;
if (addr < MAX_REG_OFFSET)
return putreg(child, addr, data);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
return 0;
}
return -EIO;
}
unsigned long getreg(struct task_struct *child, int regno)
{
unsigned long retval = ~0UL;
switch (regno) {
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
retval = 0xffff;
/* fall through */
default:
retval &= PT_REG(&child->thread.regs, regno);
#ifdef TIF_IA32
if (test_tsk_thread_flag(child, TIF_IA32))
retval &= 0xffffffff;
#endif
}
return retval;
}
int peek_user(struct task_struct *child, long addr, long data)
{
/* read the word at location addr in the USER area. */
unsigned long tmp;
if ((addr & 3) || addr < 0)
return -EIO;
tmp = 0; /* Default return condition */
if (addr < MAX_REG_OFFSET)
tmp = getreg(child, addr);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
tmp = child->thread.arch.debugregs[addr];
}
return put_user(tmp, (unsigned long *) data);
}
/* XXX Mostly copied from sys-i386 */
int is_syscall(unsigned long addr)
{
unsigned short instr;
int n;
n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
if (n) {
/*
* access_process_vm() grants access to vsyscall and stub,
* while copy_from_user doesn't. Maybe access_process_vm is
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
if (n != sizeof(instr)) {
printk("is_syscall : failed to read instruction from "
"0x%lx\n", addr);
return 1;
}
}
/* sysenter */
return instr == 0x050f;
}
int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
err = save_fp_registers(userspace_pid[cpu], fpregs);
if (err)
return err;
n = copy_to_user(buf, fpregs, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return n;
}
int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int n, cpu = ((struct thread_info *) child->stack)->cpu;
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return restore_fp_registers(userspace_pid[cpu], fpregs);
}
long subarch_ptrace(struct task_struct *child, long request, long addr,
long data)
{
int ret = -EIO;
switch (request) {
case PTRACE_GETFPXREGS: /* Get the child FPU state. */
ret = get_fpregs((struct user_i387_struct __user *) data,
child);
break;
case PTRACE_SETFPXREGS: /* Set the child FPU state. */
ret = set_fpregs((struct user_i387_struct __user *) data,
child);
break;
}
return ret;
}

View File

@@ -0,0 +1,22 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#include <errno.h>
#include "ptrace_user.h"
int ptrace_getregs(long pid, unsigned long *regs_out)
{
if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
return -errno;
return(0);
}
int ptrace_setregs(long pid, unsigned long *regs_out)
{
if (ptrace(PTRACE_SETREGS, pid, 0, regs_out) < 0)
return -errno;
return(0);
}

View File

@@ -0,0 +1,54 @@
#
# arch/x86_64/setjmp.S
#
# setjmp/longjmp for the x86-64 architecture
#
#
# The jmp_buf is assumed to contain the following, in order:
# %rbx
# %rsp (post-return)
# %rbp
# %r12
# %r13
# %r14
# %r15
# <return address>
#
.text
.align 4
.globl setjmp
.type setjmp, @function
setjmp:
pop %rsi # Return address, and adjust the stack
xorl %eax,%eax # Return value
movq %rbx,(%rdi)
movq %rsp,8(%rdi) # Post-return %rsp!
push %rsi # Make the call/return stack happy
movq %rbp,16(%rdi)
movq %r12,24(%rdi)
movq %r13,32(%rdi)
movq %r14,40(%rdi)
movq %r15,48(%rdi)
movq %rsi,56(%rdi) # Return address
ret
.size setjmp,.-setjmp
.text
.align 4
.globl longjmp
.type longjmp, @function
longjmp:
movl %esi,%eax # Return value (int)
movq (%rdi),%rbx
movq 8(%rdi),%rsp
movq 16(%rdi),%rbp
movq 24(%rdi),%r12
movq 32(%rdi),%r13
movq 40(%rdi),%r14
movq 48(%rdi),%r15
jmp *56(%rdi)
.size longjmp,.-longjmp

View File

@@ -0,0 +1,24 @@
/*
* arch/um/include/sysdep-x86_64/archsetjmp.h
*/
#ifndef _KLIBC_ARCHSETJMP_H
#define _KLIBC_ARCHSETJMP_H
struct __jmp_buf {
unsigned long __rbx;
unsigned long __rsp;
unsigned long __rbp;
unsigned long __r12;
unsigned long __r13;
unsigned long __r14;
unsigned long __r15;
unsigned long __rip;
};
typedef struct __jmp_buf jmp_buf[1];
#define JB_IP __rip
#define JB_SP __rsp
#endif /* _SETJMP_H */

View File

@@ -0,0 +1,7 @@
#ifndef __SYSDEP_X86_64_BARRIER_H
#define __SYSDEP_X86_64_BARRIER_H
/* Copied from include/asm-x86_64 for use by userspace. */
#define mb() asm volatile("mfence":::"memory")
#endif

View File

@@ -0,0 +1,144 @@
/*
* Licensed under the GPL
*/
#ifndef __UM_SYSDEP_CHECKSUM_H
#define __UM_SYSDEP_CHECKSUM_H
#include "linux/string.h"
#include "linux/in6.h"
#include "asm/uaccess.h"
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static __inline__
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
memcpy(dst, src, len);
return(csum_partial(dst, len, sum));
}
static __inline__
__wsum csum_partial_copy_from_user(const void __user *src,
void *dst, int len, __wsum sum,
int *err_ptr)
{
if (copy_from_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return csum_partial(dst, len, sum);
}
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
*
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
" addl %1,%0\n"
" adcl $0xffff,%0"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000)
);
return (__force __sum16)(~(__force u32)sum >> 16);
}
/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
asm(" addl %1, %0\n"
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
: "=r" (sum)
: "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/**
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* iph: ipv4 header
* ihl: length of header / 4
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
asm( " movl (%1), %0\n"
" subl $4, %2\n"
" jbe 2f\n"
" addl 4(%1), %0\n"
" adcl 8(%1), %0\n"
" adcl 12(%1), %0\n"
"1: adcl 16(%1), %0\n"
" lea 4(%1), %1\n"
" decl %2\n"
" jne 1b\n"
" adcl $0, %0\n"
" movl %0, %2\n"
" shrl $16, %0\n"
" addw %w2, %w0\n"
" adcl $0, %0\n"
" notl %0\n"
"2:"
/* Since the input registers which are loaded with iph and ipl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
: "0" (a), "r" (b));
return a;
}
extern __sum16 ip_compute_csum(const void *buff, int len);
#endif

View File

@@ -0,0 +1,29 @@
/*
* Copyright (C) 2004 Fujitsu Siemens Computers GmbH
* Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
* Licensed under the GPL
*/
#ifndef __FAULTINFO_X86_64_H
#define __FAULTINFO_X86_64_H
/* this structure contains the full arch-specific faultinfo
* from the traps.
* On i386, ptrace_faultinfo unfortunately doesn't provide
* all the info, since trap_no is missing.
* All common elements are defined at the same position in
* both structures, thus making it easy to copy the
* contents without knowledge about the structure elements.
*/
struct faultinfo {
int error_code; /* in ptrace_faultinfo misleadingly called is_write */
unsigned long cr2; /* in ptrace_faultinfo called addr */
int trap_no; /* missing in ptrace_faultinfo */
};
#define FAULT_WRITE(fi) ((fi).error_code & 2)
#define FAULT_ADDRESS(fi) ((fi).cr2)
#define PTRACE_FULL_FAULTINFO 1
#endif

View File

@@ -0,0 +1,38 @@
#ifndef __ASM_HOST_LDT_X86_64_H
#define __ASM_HOST_LDT_X86_64_H
#include <asm/ldt.h>
/*
* macros stolen from include/asm-x86_64/desc.h
*/
#define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
/* Don't allow setting of the lm bit. It is useless anyways because
* 64bit system calls require __USER_CS. */
#define LDT_entry_b(info) \
(((info)->base_addr & 0xff000000) | \
(((info)->base_addr & 0x00ff0000) >> 16) | \
((info)->limit & 0xf0000) | \
(((info)->read_exec_only ^ 1) << 9) | \
((info)->contents << 10) | \
(((info)->seg_not_present ^ 1) << 15) | \
((info)->seg_32bit << 22) | \
((info)->limit_in_pages << 23) | \
((info)->useable << 20) | \
/* ((info)->lm << 21) | */ \
0x7000)
#define LDT_empty(info) (\
(info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0 && \
(info)->lm == 0)
#endif

View File

@@ -0,0 +1,23 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/elf.h>
#include <linux/crypto.h>
#include <asm/page.h>
#include <asm/mman.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define DEFINE_STR1(x) #x
#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " DEFINE_STR1(val) " " #val: : )
#define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
void foo(void)
{
#include <common-offsets.h>
}

View File

@@ -0,0 +1,239 @@
/*
* Copyright 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* Licensed under the GPL
*/
#ifndef __SYSDEP_X86_64_PTRACE_H
#define __SYSDEP_X86_64_PTRACE_H
#include "user_constants.h"
#include "sysdep/faultinfo.h"
#define MAX_REG_OFFSET (UM_FRAME_SIZE)
#define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long))
#include "skas_ptregs.h"
#define REGS_IP(r) ((r)[HOST_IP])
#define REGS_SP(r) ((r)[HOST_SP])
#define REGS_RBX(r) ((r)[HOST_RBX])
#define REGS_RCX(r) ((r)[HOST_RCX])
#define REGS_RDX(r) ((r)[HOST_RDX])
#define REGS_RSI(r) ((r)[HOST_RSI])
#define REGS_RDI(r) ((r)[HOST_RDI])
#define REGS_RBP(r) ((r)[HOST_RBP])
#define REGS_RAX(r) ((r)[HOST_RAX])
#define REGS_R8(r) ((r)[HOST_R8])
#define REGS_R9(r) ((r)[HOST_R9])
#define REGS_R10(r) ((r)[HOST_R10])
#define REGS_R11(r) ((r)[HOST_R11])
#define REGS_R12(r) ((r)[HOST_R12])
#define REGS_R13(r) ((r)[HOST_R13])
#define REGS_R14(r) ((r)[HOST_R14])
#define REGS_R15(r) ((r)[HOST_R15])
#define REGS_CS(r) ((r)[HOST_CS])
#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
#define REGS_SS(r) ((r)[HOST_SS])
#define HOST_FS_BASE 21
#define HOST_GS_BASE 22
#define HOST_DS 23
#define HOST_ES 24
#define HOST_FS 25
#define HOST_GS 26
/* Also defined in asm/ptrace-x86_64.h, but not in libc headers. So, these
* are already defined for kernel code, but not for userspace code.
*/
#ifndef FS_BASE
/* These aren't defined in ptrace.h, but exist in struct user_regs_struct,
* which is what x86_64 ptrace actually uses.
*/
#define FS_BASE (HOST_FS_BASE * sizeof(long))
#define GS_BASE (HOST_GS_BASE * sizeof(long))
#define DS (HOST_DS * sizeof(long))
#define ES (HOST_ES * sizeof(long))
#define FS (HOST_FS * sizeof(long))
#define GS (HOST_GS * sizeof(long))
#endif
#define REGS_FS_BASE(r) ((r)[HOST_FS_BASE])
#define REGS_GS_BASE(r) ((r)[HOST_GS_BASE])
#define REGS_DS(r) ((r)[HOST_DS])
#define REGS_ES(r) ((r)[HOST_ES])
#define REGS_FS(r) ((r)[HOST_FS])
#define REGS_GS(r) ((r)[HOST_GS])
#define REGS_ORIG_RAX(r) ((r)[HOST_ORIG_RAX])
#define REGS_SET_SYSCALL_RETURN(r, res) REGS_RAX(r) = (res)
#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
#define REGS_SEGV_IS_FIXABLE(r) SEGV_IS_FIXABLE((r)->trap_type)
#define REGS_FAULT_ADDR(r) ((r)->fault_addr)
#define REGS_FAULT_WRITE(r) FAULT_WRITE((r)->fault_type)
#define REGS_TRAP(r) ((r)->trap_type)
#define REGS_ERR(r) ((r)->fault_type)
struct uml_pt_regs {
unsigned long gp[MAX_REG_NR];
struct faultinfo faultinfo;
long syscall;
int is_user;
};
#define EMPTY_UML_PT_REGS { }
#define UPT_RBX(r) REGS_RBX((r)->gp)
#define UPT_RCX(r) REGS_RCX((r)->gp)
#define UPT_RDX(r) REGS_RDX((r)->gp)
#define UPT_RSI(r) REGS_RSI((r)->gp)
#define UPT_RDI(r) REGS_RDI((r)->gp)
#define UPT_RBP(r) REGS_RBP((r)->gp)
#define UPT_RAX(r) REGS_RAX((r)->gp)
#define UPT_R8(r) REGS_R8((r)->gp)
#define UPT_R9(r) REGS_R9((r)->gp)
#define UPT_R10(r) REGS_R10((r)->gp)
#define UPT_R11(r) REGS_R11((r)->gp)
#define UPT_R12(r) REGS_R12((r)->gp)
#define UPT_R13(r) REGS_R13((r)->gp)
#define UPT_R14(r) REGS_R14((r)->gp)
#define UPT_R15(r) REGS_R15((r)->gp)
#define UPT_CS(r) REGS_CS((r)->gp)
#define UPT_FS_BASE(r) REGS_FS_BASE((r)->gp)
#define UPT_FS(r) REGS_FS((r)->gp)
#define UPT_GS_BASE(r) REGS_GS_BASE((r)->gp)
#define UPT_GS(r) REGS_GS((r)->gp)
#define UPT_DS(r) REGS_DS((r)->gp)
#define UPT_ES(r) REGS_ES((r)->gp)
#define UPT_CS(r) REGS_CS((r)->gp)
#define UPT_SS(r) REGS_SS((r)->gp)
#define UPT_ORIG_RAX(r) REGS_ORIG_RAX((r)->gp)
#define UPT_IP(r) REGS_IP((r)->gp)
#define UPT_SP(r) REGS_SP((r)->gp)
#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
#define UPT_SYSCALL_NR(r) ((r)->syscall)
#define UPT_SYSCALL_RET(r) UPT_RAX(r)
extern int user_context(unsigned long sp);
#define UPT_IS_USER(r) ((r)->is_user)
#define UPT_SYSCALL_ARG1(r) UPT_RDI(r)
#define UPT_SYSCALL_ARG2(r) UPT_RSI(r)
#define UPT_SYSCALL_ARG3(r) UPT_RDX(r)
#define UPT_SYSCALL_ARG4(r) UPT_R10(r)
#define UPT_SYSCALL_ARG5(r) UPT_R8(r)
#define UPT_SYSCALL_ARG6(r) UPT_R9(r)
struct syscall_args {
unsigned long args[6];
};
#define SYSCALL_ARGS(r) ((struct syscall_args) \
{ .args = { UPT_SYSCALL_ARG1(r), \
UPT_SYSCALL_ARG2(r), \
UPT_SYSCALL_ARG3(r), \
UPT_SYSCALL_ARG4(r), \
UPT_SYSCALL_ARG5(r), \
UPT_SYSCALL_ARG6(r) } } )
#define UPT_REG(regs, reg) \
({ unsigned long val; \
switch(reg){ \
case R8: val = UPT_R8(regs); break; \
case R9: val = UPT_R9(regs); break; \
case R10: val = UPT_R10(regs); break; \
case R11: val = UPT_R11(regs); break; \
case R12: val = UPT_R12(regs); break; \
case R13: val = UPT_R13(regs); break; \
case R14: val = UPT_R14(regs); break; \
case R15: val = UPT_R15(regs); break; \
case RIP: val = UPT_IP(regs); break; \
case RSP: val = UPT_SP(regs); break; \
case RAX: val = UPT_RAX(regs); break; \
case RBX: val = UPT_RBX(regs); break; \
case RCX: val = UPT_RCX(regs); break; \
case RDX: val = UPT_RDX(regs); break; \
case RSI: val = UPT_RSI(regs); break; \
case RDI: val = UPT_RDI(regs); break; \
case RBP: val = UPT_RBP(regs); break; \
case ORIG_RAX: val = UPT_ORIG_RAX(regs); break; \
case CS: val = UPT_CS(regs); break; \
case SS: val = UPT_SS(regs); break; \
case FS_BASE: val = UPT_FS_BASE(regs); break; \
case GS_BASE: val = UPT_GS_BASE(regs); break; \
case DS: val = UPT_DS(regs); break; \
case ES: val = UPT_ES(regs); break; \
case FS : val = UPT_FS (regs); break; \
case GS: val = UPT_GS(regs); break; \
case EFLAGS: val = UPT_EFLAGS(regs); break; \
default : \
panic("Bad register in UPT_REG : %d\n", reg); \
val = -1; \
} \
val; \
})
#define UPT_SET(regs, reg, val) \
({ unsigned long __upt_val = val; \
switch(reg){ \
case R8: UPT_R8(regs) = __upt_val; break; \
case R9: UPT_R9(regs) = __upt_val; break; \
case R10: UPT_R10(regs) = __upt_val; break; \
case R11: UPT_R11(regs) = __upt_val; break; \
case R12: UPT_R12(regs) = __upt_val; break; \
case R13: UPT_R13(regs) = __upt_val; break; \
case R14: UPT_R14(regs) = __upt_val; break; \
case R15: UPT_R15(regs) = __upt_val; break; \
case RIP: UPT_IP(regs) = __upt_val; break; \
case RSP: UPT_SP(regs) = __upt_val; break; \
case RAX: UPT_RAX(regs) = __upt_val; break; \
case RBX: UPT_RBX(regs) = __upt_val; break; \
case RCX: UPT_RCX(regs) = __upt_val; break; \
case RDX: UPT_RDX(regs) = __upt_val; break; \
case RSI: UPT_RSI(regs) = __upt_val; break; \
case RDI: UPT_RDI(regs) = __upt_val; break; \
case RBP: UPT_RBP(regs) = __upt_val; break; \
case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
case CS: UPT_CS(regs) = __upt_val; break; \
case SS: UPT_SS(regs) = __upt_val; break; \
case FS_BASE: UPT_FS_BASE(regs) = __upt_val; break; \
case GS_BASE: UPT_GS_BASE(regs) = __upt_val; break; \
case DS: UPT_DS(regs) = __upt_val; break; \
case ES: UPT_ES(regs) = __upt_val; break; \
case FS: UPT_FS(regs) = __upt_val; break; \
case GS: UPT_GS(regs) = __upt_val; break; \
case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
default : \
panic("Bad register in UPT_SET : %d\n", reg); \
break; \
} \
__upt_val; \
})
#define UPT_SET_SYSCALL_RETURN(r, res) \
REGS_SET_SYSCALL_RETURN((r)->regs, (res))
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
#define UPT_SEGV_IS_FIXABLE(r) REGS_SEGV_IS_FIXABLE(&r->skas)
#define UPT_FAULTINFO(r) (&(r)->faultinfo)
static inline void arch_init_registers(int pid)
{
}
#endif

View File

@@ -0,0 +1,77 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __SYSDEP_X86_64_PTRACE_USER_H__
#define __SYSDEP_X86_64_PTRACE_USER_H__
#define __FRAME_OFFSETS
#include <sys/ptrace.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#undef __FRAME_OFFSETS
#include "user_constants.h"
#define PT_INDEX(off) ((off) / sizeof(unsigned long))
#define PT_SYSCALL_NR(regs) ((regs)[PT_INDEX(ORIG_RAX)])
#define PT_SYSCALL_NR_OFFSET (ORIG_RAX)
#define PT_SYSCALL_ARG1(regs) (((unsigned long *) (regs))[PT_INDEX(RDI)])
#define PT_SYSCALL_ARG1_OFFSET (RDI)
#define PT_SYSCALL_ARG2(regs) (((unsigned long *) (regs))[PT_INDEX(RSI)])
#define PT_SYSCALL_ARG2_OFFSET (RSI)
#define PT_SYSCALL_ARG3(regs) (((unsigned long *) (regs))[PT_INDEX(RDX)])
#define PT_SYSCALL_ARG3_OFFSET (RDX)
#define PT_SYSCALL_ARG4(regs) (((unsigned long *) (regs))[PT_INDEX(RCX)])
#define PT_SYSCALL_ARG4_OFFSET (RCX)
#define PT_SYSCALL_ARG5(regs) (((unsigned long *) (regs))[PT_INDEX(R8)])
#define PT_SYSCALL_ARG5_OFFSET (R8)
#define PT_SYSCALL_ARG6(regs) (((unsigned long *) (regs))[PT_INDEX(R9)])
#define PT_SYSCALL_ARG6_OFFSET (R9)
#define PT_SYSCALL_RET_OFFSET (RAX)
#define PT_IP_OFFSET (RIP)
#define PT_IP(regs) ((regs)[PT_INDEX(RIP)])
#define PT_SP_OFFSET (RSP)
#define PT_SP(regs) ((regs)[PT_INDEX(RSP)])
#define PT_ORIG_RAX_OFFSET (ORIG_RAX)
#define PT_ORIG_RAX(regs) ((regs)[PT_INDEX(ORIG_RAX)])
/*
* x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
* it's defined in the kernel's include/linux/ptrace.h. Additionally, use the
* 2.4 name and value for 2.4 host compatibility.
*/
#ifndef PTRACE_OLDSETOPTIONS
#define PTRACE_OLDSETOPTIONS 21
#endif
/*
* These are before the system call, so the system call number is RAX
* rather than ORIG_RAX, and arg4 is R10 rather than RCX
*/
#define REGS_SYSCALL_NR PT_INDEX(RAX)
#define REGS_SYSCALL_ARG1 PT_INDEX(RDI)
#define REGS_SYSCALL_ARG2 PT_INDEX(RSI)
#define REGS_SYSCALL_ARG3 PT_INDEX(RDX)
#define REGS_SYSCALL_ARG4 PT_INDEX(R10)
#define REGS_SYSCALL_ARG5 PT_INDEX(R8)
#define REGS_SYSCALL_ARG6 PT_INDEX(R9)
#define REGS_IP_INDEX PT_INDEX(RIP)
#define REGS_SP_INDEX PT_INDEX(RSP)
#define FP_SIZE (HOST_FP_SIZE)
#endif

View File

@@ -0,0 +1,45 @@
#ifndef __SYSDEP_X86_64_SC_H
#define __SYSDEP_X86_64_SC_H
/* Copyright (C) 2003 - 2004 PathScale, Inc
* Released under the GPL
*/
#include <user_constants.h>
#define SC_OFFSET(sc, field) \
*((unsigned long *) &(((char *) (sc))[HOST_##field]))
#define SC_RBX(sc) SC_OFFSET(sc, SC_RBX)
#define SC_RCX(sc) SC_OFFSET(sc, SC_RCX)
#define SC_RDX(sc) SC_OFFSET(sc, SC_RDX)
#define SC_RSI(sc) SC_OFFSET(sc, SC_RSI)
#define SC_RDI(sc) SC_OFFSET(sc, SC_RDI)
#define SC_RBP(sc) SC_OFFSET(sc, SC_RBP)
#define SC_RAX(sc) SC_OFFSET(sc, SC_RAX)
#define SC_R8(sc) SC_OFFSET(sc, SC_R8)
#define SC_R9(sc) SC_OFFSET(sc, SC_R9)
#define SC_R10(sc) SC_OFFSET(sc, SC_R10)
#define SC_R11(sc) SC_OFFSET(sc, SC_R11)
#define SC_R12(sc) SC_OFFSET(sc, SC_R12)
#define SC_R13(sc) SC_OFFSET(sc, SC_R13)
#define SC_R14(sc) SC_OFFSET(sc, SC_R14)
#define SC_R15(sc) SC_OFFSET(sc, SC_R15)
#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
#if 0
#define SC_ORIG_RAX(sc) SC_OFFSET(sc, SC_ORIG_RAX)
#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
#endif
#endif

View File

@@ -0,0 +1,27 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __SYSDEP_X86_64_SIGCONTEXT_H
#define __SYSDEP_X86_64_SIGCONTEXT_H
#include <sysdep/sc.h>
#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
#define GET_FAULTINFO_FROM_SC(fi, sc) \
{ \
(fi).cr2 = SC_CR2(sc); \
(fi).error_code = SC_ERR(sc); \
(fi).trap_no = SC_TRAPNO(sc); \
}
/* This is Page Fault */
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
/* No broken SKAS API, which doesn't pass trap_no, here. */
#define SEGV_MAYBE_FIXABLE(fi) 0
#endif

View File

@@ -0,0 +1,22 @@
/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_X86_64_SKAS_PTRACE_H
#define __SYSDEP_X86_64_SKAS_PTRACE_H
struct ptrace_faultinfo {
int is_write;
unsigned long addr;
};
struct ptrace_ldt {
int func;
void *ptr;
unsigned long bytecount;
};
#define PTRACE_LDT 54
#endif

View File

@@ -0,0 +1,107 @@
/*
* Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_STUB_H
#define __SYSDEP_STUB_H
#include <sys/mman.h>
#include <asm/unistd.h>
#include <sysdep/ptrace_user.h>
#include "as-layout.h"
#include "stub-data.h"
#include "kern_constants.h"
extern void stub_segv_handler(int sig);
extern void stub_clone_handler(void);
#define STUB_SYSCALL_RET PT_INDEX(RAX)
#define STUB_MMAP_NR __NR_mmap
#define MMAP_OFFSET(o) (o)
#define __syscall_clobber "r11","rcx","memory"
#define __syscall "syscall"
static inline long stub_syscall0(long syscall)
{
long ret;
__asm__ volatile (__syscall
: "=a" (ret)
: "0" (syscall) : __syscall_clobber );
return ret;
}
static inline long stub_syscall2(long syscall, long arg1, long arg2)
{
long ret;
__asm__ volatile (__syscall
: "=a" (ret)
: "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
return ret;
}
static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
{
long ret;
__asm__ volatile (__syscall
: "=a" (ret)
: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
: __syscall_clobber );
return ret;
}
static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
long arg4)
{
long ret;
__asm__ volatile ("movq %5,%%r10 ; " __syscall
: "=a" (ret)
: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
"g" (arg4)
: __syscall_clobber, "r10" );
return ret;
}
static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
long arg4, long arg5)
{
long ret;
__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
: "=a" (ret)
: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
"g" (arg4), "g" (arg5)
: __syscall_clobber, "r10", "r8" );
return ret;
}
static inline void trap_myself(void)
{
__asm("int3");
}
static inline void remap_stack(long fd, unsigned long offset)
{
__asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
"movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
"movq %%rax, (%%rbx)":
: "a" (STUB_MMAP_NR), "D" (STUB_DATA),
"S" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE),
"g" (MAP_FIXED | MAP_SHARED), "g" (fd),
"g" (offset),
"i" (&((struct stub_data *) STUB_DATA)->err)
: __syscall_clobber, "r10", "r8", "r9" );
}
#endif

View File

@@ -0,0 +1,33 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#ifndef __SYSDEP_X86_64_SYSCALLS_H__
#define __SYSDEP_X86_64_SYSCALLS_H__
#include <linux/msg.h>
#include <linux/shm.h>
#include <kern_constants.h>
typedef long syscall_handler_t(void);
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
(((long (*)(long, long, long, long, long, long)) \
(*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
UPT_SYSCALL_ARG2(&regs->regs), \
UPT_SYSCALL_ARG3(&regs->regs), \
UPT_SYSCALL_ARG4(&regs->regs), \
UPT_SYSCALL_ARG5(&regs->regs), \
UPT_SYSCALL_ARG6(&regs->regs)))
extern long old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
extern syscall_handler_t sys_modify_ldt;
extern syscall_handler_t sys_arch_prctl;
#endif

View File

@@ -0,0 +1,132 @@
#ifndef _ASM_X86_SYSTEM_H_
#define _ASM_X86_SYSTEM_H_
#include <asm/asm.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif
extern unsigned long arch_align_stack(unsigned long sp);
void default_idle(void);
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#ifdef CONFIG_X86_OOSTORE
# define smp_wmb() wmb()
#else
# define smp_wmb() barrier()
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif

View File

@@ -0,0 +1,29 @@
#ifndef _SYSDEP_TLS_H
#define _SYSDEP_TLS_H
# ifndef __KERNEL__
/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
* may be named user_desc (but in 2.4 and in header matching its API was named
* modify_ldt_ldt_s). */
typedef struct um_dup_user_desc {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
unsigned int lm:1;
} user_desc_t;
# else /* __KERNEL__ */
# include <ldt.h>
typedef struct user_desc user_desc_t;
# endif /* __KERNEL__ */
#endif /* _SYSDEP_TLS_H */

View File

@@ -0,0 +1,33 @@
/*
* Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
* Copyright 2003 PathScale, Inc.
* Licensed under the GPL
*/
#ifndef __VM_FLAGS_X86_64_H
#define __VM_FLAGS_X86_64_H
#define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
VM_EXEC | VM_MAYREAD | VM_MAYWRITE | \
VM_MAYEXEC)
extern unsigned long vm_stack_flags, vm_stack_flags32;
extern unsigned long vm_data_default_flags, vm_data_default_flags32;
extern unsigned long vm_force_exec32;
#ifdef TIF_IA32
#define VM_DATA_DEFAULT_FLAGS \
(test_thread_flag(TIF_IA32) ? vm_data_default_flags32 : \
vm_data_default_flags)
#define VM_STACK_DEFAULT_FLAGS \
(test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
#endif
#define VM_DATA_DEFAULT_FLAGS vm_data_default_flags
#define VM_STACK_DEFAULT_FLAGS vm_stack_flags
#endif

View File

@@ -0,0 +1,291 @@
/*
* Copyright (C) 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include "frame_kern.h"
#include "skas.h"
void copy_sc(struct uml_pt_regs *regs, void *from)
{
struct sigcontext *sc = from;
#define GETREG(regs, regno, sc, regname) \
(regs)->gp[(regno) / sizeof(unsigned long)] = (sc)->regname
GETREG(regs, R8, sc, r8);
GETREG(regs, R9, sc, r9);
GETREG(regs, R10, sc, r10);
GETREG(regs, R11, sc, r11);
GETREG(regs, R12, sc, r12);
GETREG(regs, R13, sc, r13);
GETREG(regs, R14, sc, r14);
GETREG(regs, R15, sc, r15);
GETREG(regs, RDI, sc, di);
GETREG(regs, RSI, sc, si);
GETREG(regs, RBP, sc, bp);
GETREG(regs, RBX, sc, bx);
GETREG(regs, RDX, sc, dx);
GETREG(regs, RAX, sc, ax);
GETREG(regs, RCX, sc, cx);
GETREG(regs, RSP, sc, sp);
GETREG(regs, RIP, sc, ip);
GETREG(regs, EFLAGS, sc, flags);
GETREG(regs, CS, sc, cs);
#undef GETREG
}
static int copy_sc_from_user(struct pt_regs *regs,
struct sigcontext __user *from,
struct _fpstate __user *fpp)
{
struct user_i387_struct fp;
int err = 0;
#define GETREG(regs, regno, sc, regname) \
__get_user((regs)->regs.gp[(regno) / sizeof(unsigned long)], \
&(sc)->regname)
err |= GETREG(regs, R8, from, r8);
err |= GETREG(regs, R9, from, r9);
err |= GETREG(regs, R10, from, r10);
err |= GETREG(regs, R11, from, r11);
err |= GETREG(regs, R12, from, r12);
err |= GETREG(regs, R13, from, r13);
err |= GETREG(regs, R14, from, r14);
err |= GETREG(regs, R15, from, r15);
err |= GETREG(regs, RDI, from, di);
err |= GETREG(regs, RSI, from, si);
err |= GETREG(regs, RBP, from, bp);
err |= GETREG(regs, RBX, from, bx);
err |= GETREG(regs, RDX, from, dx);
err |= GETREG(regs, RAX, from, ax);
err |= GETREG(regs, RCX, from, cx);
err |= GETREG(regs, RSP, from, sp);
err |= GETREG(regs, RIP, from, ip);
err |= GETREG(regs, EFLAGS, from, flags);
err |= GETREG(regs, CS, from, cs);
if (err)
return 1;
#undef GETREG
err = copy_from_user(&fp, fpp, sizeof(struct user_i387_struct));
if (err)
return 1;
err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
(unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fp_registers failed, errno = %d\n",
-err);
return 1;
}
return 0;
}
static int copy_sc_to_user(struct sigcontext __user *to,
struct _fpstate __user *to_fp, struct pt_regs *regs,
unsigned long mask, unsigned long sp)
{
struct faultinfo * fi = &current->thread.arch.faultinfo;
struct user_i387_struct fp;
int err = 0;
err |= __put_user(0, &to->gs);
err |= __put_user(0, &to->fs);
#define PUTREG(regs, regno, sc, regname) \
__put_user((regs)->regs.gp[(regno) / sizeof(unsigned long)], \
&(sc)->regname)
err |= PUTREG(regs, RDI, to, di);
err |= PUTREG(regs, RSI, to, si);
err |= PUTREG(regs, RBP, to, bp);
/*
* Must use original RSP, which is passed in, rather than what's in
* the pt_regs, because that's already been updated to point at the
* signal frame.
*/
err |= __put_user(sp, &to->sp);
err |= PUTREG(regs, RBX, to, bx);
err |= PUTREG(regs, RDX, to, dx);
err |= PUTREG(regs, RCX, to, cx);
err |= PUTREG(regs, RAX, to, ax);
err |= PUTREG(regs, R8, to, r8);
err |= PUTREG(regs, R9, to, r9);
err |= PUTREG(regs, R10, to, r10);
err |= PUTREG(regs, R11, to, r11);
err |= PUTREG(regs, R12, to, r12);
err |= PUTREG(regs, R13, to, r13);
err |= PUTREG(regs, R14, to, r14);
err |= PUTREG(regs, R15, to, r15);
err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */
err |= __put_user(fi->cr2, &to->cr2);
err |= __put_user(fi->error_code, &to->err);
err |= __put_user(fi->trap_no, &to->trapno);
err |= PUTREG(regs, RIP, to, ip);
err |= PUTREG(regs, EFLAGS, to, flags);
#undef PUTREG
err |= __put_user(mask, &to->oldmask);
if (err)
return 1;
err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
(unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
"failed, errno = %d\n", -err);
return 1;
}
if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
return 1;
return err;
}
struct rt_sigframe
{
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
struct _fpstate fpstate;
};
#define round_down(m, n) (((m) / (n)) * (n))
int setup_signal_stack_si(unsigned long stack_top, int sig,
struct k_sigaction *ka, struct pt_regs * regs,
siginfo_t *info, sigset_t *set)
{
struct rt_sigframe __user *frame;
unsigned long save_sp = PT_REGS_RSP(regs);
int err = 0;
struct task_struct *me = current;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
/* Subtract 128 for a red zone and 8 for proper alignment */
frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto out;
if (ka->sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, info);
if (err)
goto out;
}
/*
* Update SP now because the page fault handler refuses to extend
* the stack if the faulting address is too far below the current
* SP, which frame now certainly is. If there's an error, the original
* value is restored on the way out.
* When writing the sigcontext to the stack, we have to write the
* original value, so that's passed to copy_sc_to_user, which does
* the right thing with it.
*/
PT_REGS_RSP(regs) = (unsigned long) frame;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(save_sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0], save_sp);
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
if (sizeof(*set) == 16) {
__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
}
else
err |= __copy_to_user(&frame->uc.uc_sigmask, set,
sizeof(*set));
/*
* Set up to return from userspace. If provided, use a stub
* already in userspace.
*/
/* x86-64 should always use SA_RESTORER. */
if (ka->sa.sa_flags & SA_RESTORER)
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
else
/* could use a vstub here */
goto restore_sp;
if (err)
goto restore_sp;
/* Set up registers for signal handler */
{
struct exec_domain *ed = current_thread_info()->exec_domain;
if (unlikely(ed && ed->signal_invmap && sig < 32))
sig = ed->signal_invmap[sig];
}
PT_REGS_RDI(regs) = sig;
/* In case the signal handler was declared without prototypes */
PT_REGS_RAX(regs) = 0;
/*
* This also works for non SA_SIGINFO handlers because they expect the
* next argument after the signal number on the stack.
*/
PT_REGS_RSI(regs) = (unsigned long) &frame->info;
PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
out:
return err;
restore_sp:
PT_REGS_RSP(regs) = save_sp;
return err;
}
long sys_rt_sigreturn(struct pt_regs *regs)
{
unsigned long sp = PT_REGS_SP(&current->thread.regs);
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(sp - 8);
struct ucontext __user *uc = &frame->uc;
sigset_t set;
if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto segfault;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext,
&frame->fpstate))
goto segfault;
/* Avoid ERESTART handling */
PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
return PT_REGS_SYSCALL_RET(&current->thread.regs);
segfault:
force_sig(SIGSEGV, current);
return 0;
}

View File

@@ -0,0 +1,66 @@
#include "as-layout.h"
.globl syscall_stub
.section .__syscall_stub, "ax"
syscall_stub:
syscall
/* We don't have 64-bit constants, so this constructs the address
* we need.
*/
movq $(STUB_DATA >> 32), %rbx
salq $32, %rbx
movq $(STUB_DATA & 0xffffffff), %rcx
or %rcx, %rbx
movq %rax, (%rbx)
int3
.globl batch_syscall_stub
batch_syscall_stub:
mov $(STUB_DATA >> 32), %rbx
sal $32, %rbx
mov $(STUB_DATA & 0xffffffff), %rax
or %rax, %rbx
/* load pointer to first operation */
mov %rbx, %rsp
add $0x10, %rsp
again:
/* load length of additional data */
mov 0x0(%rsp), %rax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %rax, 8(%rbx)
cmp $0, %rax
jz done
/* save current pointer */
mov %rsp, 8(%rbx)
/* skip additional data */
add %rax, %rsp
/* load syscall-# */
pop %rax
/* load syscall params */
pop %rdi
pop %rsi
pop %rdx
pop %r10
pop %r8
pop %r9
/* execute syscall */
syscall
/* check return value */
pop %rcx
cmp %rcx, %rax
je again
done:
/* save return value */
mov %rax, (%rbx)
/* stop */
int3

View File

@@ -0,0 +1,22 @@
/*
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <signal.h>
#include "as-layout.h"
#include "sysdep/stub.h"
#include "sysdep/faultinfo.h"
#include "sysdep/sigcontext.h"
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig)
{
struct ucontext *uc;
__asm__ __volatile__("movq %%rdx, %0" : "=g" (uc) :);
GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);
trap_myself();
}

View File

@@ -0,0 +1,70 @@
/*
* System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
* with some changes for UML.
*/
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <kern_constants.h>
#define __NO_STUBS
/*
* Below you can see, in terms of #define's, the differences between the x86-64
* and the UML syscall table.
*/
/* Not going to be implemented by UML, since we have no hardware. */
#define stub_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
/*
* The UML TLS problem. Note that x86_64 does not implement this, so the below
* is needed only for the ia32 compatibility.
*/
/* On UML we call it this way ("old" means it's not mmap2) */
#define sys_mmap old_mmap
/*
* On x86-64 sys_uname is actually sys_newuname plus a compatibility trick.
* See arch/x86_64/kernel/sys_x86_64.c
*/
#define sys_uname sys_uname64
#define stub_clone sys_clone
#define stub_fork sys_fork
#define stub_vfork sys_vfork
#define stub_execve sys_execve
#define stub_rt_sigsuspend sys_rt_sigsuspend
#define stub_sigaltstack sys_sigaltstack
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
#undef _ASM_X86_UNISTD_64_H
#include "../../x86/include/asm/unistd_64.h"
#undef __SYSCALL
#define __SYSCALL(nr, sym) [ nr ] = sym,
#undef _ASM_X86_UNISTD_64_H
typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
/*
* We used to have a trick here which made sure that holes in the
* x86_64 table were filled in with sys_ni_syscall, but a comment in
* unistd_64.h says that holes aren't allowed, so the trick was
* removed.
* The trick looked like this
* [0 ... UM_NR_syscall_max] = &sys_ni_syscall
* before including unistd_64.h - the later initializations overwrote
* the sys_ni_syscall filler.
*/
sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
#include "../../x86/include/asm/unistd_64.h"
};
int syscall_table_size = sizeof(sys_call_table);

View File

@@ -0,0 +1,116 @@
/*
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#include "linux/linkage.h"
#include "linux/personality.h"
#include "linux/utsname.h"
#include "asm/prctl.h" /* XXX This should get the constants from libc */
#include "asm/uaccess.h"
#include "os.h"
asmlinkage long sys_uname64(struct new_utsname __user * name)
{
int err;
down_read(&uts_sem);
err = copy_to_user(name, utsname(), sizeof (*name));
up_read(&uts_sem);
if (personality(current->personality) == PER_LINUX32)
err |= copy_to_user(&name->machine, "i686", 5);
return err ? -EFAULT : 0;
}
long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
{
unsigned long *ptr = addr, tmp;
long ret;
int pid = task->mm->context.id.u.pid;
/*
* With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
* be safe), we need to call arch_prctl on the host because
* setting %fs may result in something else happening (like a
* GDT or thread.fs being set instead). So, we let the host
* fiddle the registers and thread struct and restore the
* registers afterwards.
*
* So, the saved registers are stored to the process (this
* needed because a stub may have been the last thing to run),
* arch_prctl is run on the host, then the registers are read
* back.
*/
switch (code) {
case ARCH_SET_FS:
case ARCH_SET_GS:
ret = restore_registers(pid, &current->thread.regs.regs);
if (ret)
return ret;
break;
case ARCH_GET_FS:
case ARCH_GET_GS:
/*
* With these two, we read to a local pointer and
* put_user it to the userspace pointer that we were
* given. If addr isn't valid (because it hasn't been
* faulted in or is just bogus), we want put_user to
* fault it in (or return -EFAULT) instead of having
* the host return -EFAULT.
*/
ptr = &tmp;
}
ret = os_arch_prctl(pid, code, ptr);
if (ret)
return ret;
switch (code) {
case ARCH_SET_FS:
current->thread.arch.fs = (unsigned long) ptr;
ret = save_registers(pid, &current->thread.regs.regs);
break;
case ARCH_SET_GS:
ret = save_registers(pid, &current->thread.regs.regs);
break;
case ARCH_GET_FS:
ret = put_user(tmp, addr);
break;
case ARCH_GET_GS:
ret = put_user(tmp, addr);
break;
}
return ret;
}
long sys_arch_prctl(int code, unsigned long addr)
{
return arch_prctl(current, code, (unsigned long __user *) addr);
}
long sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid)
{
long ret;
if (!newsp)
newsp = UPT_SP(&current->thread.regs.regs);
current->thread.forking = 1;
ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
child_tid);
current->thread.forking = 0;
return ret;
}
void arch_switch_to(struct task_struct *to)
{
if ((to->thread.arch.fs == 0) || (to->mm == NULL))
return;
arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
}

View File

@@ -0,0 +1,41 @@
/*
* Copyright 2003 PathScale, Inc.
*
* Licensed under the GPL
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/utsname.h>
#include <asm/current.h>
#include <asm/ptrace.h>
#include "sysrq.h"
void __show_regs(struct pt_regs *regs)
{
printk("\n");
print_modules();
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
current->comm, print_tainted(), init_utsname()->release);
printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
PT_REGS_RIP(regs));
printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs),
PT_REGS_EFLAGS(regs));
printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs));
printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs));
printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
}
void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
show_trace(current, (unsigned long *) &regs);
}

View File

@@ -0,0 +1,17 @@
#include "linux/sched.h"
void clear_flushed_tls(struct task_struct *task)
{
}
int arch_copy_tls(struct task_struct *t)
{
/*
* If CLONE_SETTLS is set, we need to save the thread id
* (which is argument 5, child_tid, of clone) so it can be set
* during context switches.
*/
t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
return 0;
}

View File

@@ -0,0 +1,65 @@
#include <stdio.h>
#include <stddef.h>
#include <signal.h>
#include <sys/poll.h>
#include <sys/mman.h>
#include <sys/user.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <asm/types.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define DEFINE_LONGS(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
void foo(void)
{
OFFSET(HOST_SC_CR2, sigcontext, cr2);
OFFSET(HOST_SC_ERR, sigcontext, err);
OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
DEFINE_LONGS(HOST_RBX, RBX);
DEFINE_LONGS(HOST_RCX, RCX);
DEFINE_LONGS(HOST_RDI, RDI);
DEFINE_LONGS(HOST_RSI, RSI);
DEFINE_LONGS(HOST_RDX, RDX);
DEFINE_LONGS(HOST_RBP, RBP);
DEFINE_LONGS(HOST_RAX, RAX);
DEFINE_LONGS(HOST_R8, R8);
DEFINE_LONGS(HOST_R9, R9);
DEFINE_LONGS(HOST_R10, R10);
DEFINE_LONGS(HOST_R11, R11);
DEFINE_LONGS(HOST_R12, R12);
DEFINE_LONGS(HOST_R13, R13);
DEFINE_LONGS(HOST_R14, R14);
DEFINE_LONGS(HOST_R15, R15);
DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX);
DEFINE_LONGS(HOST_CS, CS);
DEFINE_LONGS(HOST_SS, SS);
DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
#if 0
DEFINE_LONGS(HOST_FS, FS);
DEFINE_LONGS(HOST_GS, GS);
DEFINE_LONGS(HOST_DS, DS);
DEFINE_LONGS(HOST_ES, ES);
#endif
DEFINE_LONGS(HOST_IP, RIP);
DEFINE_LONGS(HOST_SP, RSP);
DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
/* XXX Duplicated between i386 and x86_64 */
DEFINE(UM_POLLIN, POLLIN);
DEFINE(UM_POLLPRI, POLLPRI);
DEFINE(UM_POLLOUT, POLLOUT);
DEFINE(UM_PROT_READ, PROT_READ);
DEFINE(UM_PROT_WRITE, PROT_WRITE);
DEFINE(UM_PROT_EXEC, PROT_EXEC);
}