add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,21 @@
#
# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
#
obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
sys_call_table.o tls.o
subarch-obj-y = lib/semaphore_32.o lib/string_32.o
subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
USER_OBJS := bugs.o ptrace_user.o fault.o
USER_OBJS += user-offsets.s
extra-y += user-offsets.s
UNPROFILE_OBJS := stub_segv.o
CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
include arch/um/scripts/Makefile.rules

View File

@@ -0,0 +1,16 @@
/*
* Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __UM_ARCHPARAM_I386_H
#define __UM_ARCHPARAM_I386_H
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#endif

View File

@@ -0,0 +1,163 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_ELF_I386_H
#define __UM_ELF_I386_H
#include <asm/user.h>
#include "skas.h"
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
#define R_386_GOT32 3
#define R_386_PLT32 4
#define R_386_COPY 5
#define R_386_GLOB_DAT 6
#define R_386_JMP_SLOT 7
#define R_386_RELATIVE 8
#define R_386_GOTOFF 9
#define R_386_GOTPC 10
#define R_386_NUM 11
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_386
#define ELF_PLAT_INIT(regs, load_addr) do { \
PT_REGS_EBX(regs) = 0; \
PT_REGS_ECX(regs) = 0; \
PT_REGS_EDX(regs) = 0; \
PT_REGS_ESI(regs) = 0; \
PT_REGS_EDI(regs) = 0; \
PT_REGS_EBP(regs) = 0; \
PT_REGS_EAX(regs) = 0; \
} while (0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
/* Shamelessly stolen from include/asm-i386/elf.h */
#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
pr_reg[0] = PT_REGS_EBX(regs); \
pr_reg[1] = PT_REGS_ECX(regs); \
pr_reg[2] = PT_REGS_EDX(regs); \
pr_reg[3] = PT_REGS_ESI(regs); \
pr_reg[4] = PT_REGS_EDI(regs); \
pr_reg[5] = PT_REGS_EBP(regs); \
pr_reg[6] = PT_REGS_EAX(regs); \
pr_reg[7] = PT_REGS_DS(regs); \
pr_reg[8] = PT_REGS_ES(regs); \
/* fake once used fs and gs selectors? */ \
pr_reg[9] = PT_REGS_DS(regs); \
pr_reg[10] = PT_REGS_DS(regs); \
pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \
pr_reg[12] = PT_REGS_IP(regs); \
pr_reg[13] = PT_REGS_CS(regs); \
pr_reg[14] = PT_REGS_EFLAGS(regs); \
pr_reg[15] = PT_REGS_SP(regs); \
pr_reg[16] = PT_REGS_SS(regs); \
} while (0);
extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
extern long elf_aux_hwcap;
#define ELF_HWCAP (elf_aux_hwcap)
extern char * elf_aux_platform;
#define ELF_PLATFORM (elf_aux_platform)
#define SET_PERSONALITY(ex) do { } while (0)
extern unsigned long vsyscall_ehdr;
extern unsigned long vsyscall_end;
extern unsigned long __kernel_vsyscall;
#define VSYSCALL_BASE vsyscall_ehdr
#define VSYSCALL_END vsyscall_end
/*
* This is the range that is readable by user mode, and things
* acting like user mode such as get_user_pages.
*/
#define FIXADDR_USER_START VSYSCALL_BASE
#define FIXADDR_USER_END VSYSCALL_END
/*
* Architecture-neutral AT_ values in 0-17, leave some room
* for more of them, start the x86-specific ones at 32.
*/
#define AT_SYSINFO 32
#define AT_SYSINFO_EHDR 33
#define ARCH_DLINFO \
do { \
if ( vsyscall_ehdr ) { \
NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \
} \
} while (0)
/*
* These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the vsyscall DSO contents. Dumping its
* contents makes post-mortem fully interpretable later without matching up
* the same kernel and hardware config to see what PC values meant.
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the vsyscall DSO was being used.
*/
#define ELF_CORE_EXTRA_PHDRS \
(vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0 )
#define ELF_CORE_WRITE_EXTRA_PHDRS \
if ( vsyscall_ehdr ) { \
const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \
const struct elf_phdr *const phdrp = \
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \
int i; \
Elf32_Off ofs = 0; \
for (i = 0; i < ehdrp->e_phnum; ++i) { \
struct elf_phdr phdr = phdrp[i]; \
if (phdr.p_type == PT_LOAD) { \
ofs = phdr.p_offset = offset; \
offset += phdr.p_filesz; \
} \
else \
phdr.p_offset += ofs; \
phdr.p_paddr = 0; /* match other core phdrs */ \
DUMP_WRITE(&phdr, sizeof(phdr)); \
} \
}
#define ELF_CORE_WRITE_EXTRA_DATA \
if ( vsyscall_ehdr ) { \
const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \
const struct elf_phdr *const phdrp = \
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \
int i; \
for (i = 0; i < ehdrp->e_phnum; ++i) { \
if (phdrp[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) phdrp[i].p_vaddr, \
phdrp[i].p_filesz); \
} \
}
#endif

View File

@@ -0,0 +1,13 @@
#ifndef __UM_MODULE_I386_H
#define __UM_MODULE_I386_H
/* UML is simple */
struct mod_arch_specific
{
};
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#endif

View File

@@ -0,0 +1,78 @@
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __UM_PROCESSOR_I386_H
#define __UM_PROCESSOR_I386_H
#include "linux/string.h"
#include <sysdep/host_ldt.h>
#include "asm/segment.h"
extern int host_has_cmov;
/* include faultinfo structure */
#include "sysdep/faultinfo.h"
struct uml_tls_struct {
struct user_desc tls;
unsigned flushed:1;
unsigned present:1;
};
struct arch_thread {
struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
unsigned long debugregs[8];
int debugregs_seq;
struct faultinfo faultinfo;
};
#define INIT_ARCH_THREAD { \
.tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
{ .present = 0, .flushed = 0 } }, \
.debugregs = { [ 0 ... 7 ] = 0 }, \
.debugregs_seq = 0, \
.faultinfo = { 0, 0, 0 } \
}
static inline void arch_flush_thread(struct arch_thread *thread)
{
/* Clear any TLS still hanging */
memset(&thread->tls_array, 0, sizeof(thread->tls_array));
}
static inline void arch_copy_thread(struct arch_thread *from,
struct arch_thread *to)
{
memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
}
#include <asm/user.h>
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
__asm__ __volatile__("rep;nop": : :"memory");
}
#define cpu_relax() rep_nop()
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter"). Stolen
* from asm-i386/processor.h
*/
#define current_text_addr() \
({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
#define ARCH_IS_STACKGROW(address) \
(address + 32 >= UPT_SP(&current->thread.regs.regs))
#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
#include "asm/processor-generic.h"
#endif

View File

@@ -0,0 +1,56 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_PTRACE_I386_H
#define __UM_PTRACE_I386_H
#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
#include "linux/compiler.h"
#include "asm/ptrace-generic.h"
#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs)
#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs)
#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs)
#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs)
#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs)
#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r)
#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r)
#define PT_FIX_EXEC_STACK(sp) do ; while(0)
#define profile_pc(regs) PT_REGS_IP(regs)
#define user_mode(r) UPT_IS_USER(&(r)->regs)
/*
* Forward declaration to avoid including sysdep/tls.h, which causes a
* circular include, and compilation failures.
*/
struct user_desc;
extern int get_fpxregs(struct user_fxsr_struct __user *buf,
struct task_struct *child);
extern int set_fpxregs(struct user_fxsr_struct __user *buf,
struct task_struct *tsk);
extern int ptrace_get_thread_area(struct task_struct *child, int idx,
struct user_desc __user *user_desc);
extern int ptrace_set_thread_area(struct task_struct *child, int idx,
struct user_desc __user *user_desc);
#endif

View File

@@ -0,0 +1,21 @@
/*
* Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL V2
*/
#include <linux/uaccess.h>
#include <asm/errno.h>
/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
* that's not relevant in skas mode.
*/
int is_valid_bugaddr(unsigned long eip)
{
unsigned short ud2;
if (probe_kernel_address((unsigned short __user *)eip, ud2))
return 0;
return ud2 == 0x0b0f;
}

View File

@@ -0,0 +1,76 @@
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <signal.h>
#include "kern_constants.h"
#include "kern_util.h"
#include "longjmp.h"
#include "task.h"
#include "user.h"
#include "sysdep/ptrace.h"
/* Set during early boot */
static int host_has_cmov = 1;
static jmp_buf cmov_test_return;
static void cmov_sigill_test_handler(int sig)
{
host_has_cmov = 0;
longjmp(cmov_test_return, 1);
}
void arch_check_bugs(void)
{
struct sigaction old, new;
printk(UM_KERN_INFO "Checking for host processor cmov support...");
new.sa_handler = cmov_sigill_test_handler;
/* Make sure that SIGILL is enabled after the handler longjmps back */
new.sa_flags = SA_NODEFER;
sigemptyset(&new.sa_mask);
sigaction(SIGILL, &new, &old);
if (setjmp(cmov_test_return) == 0) {
unsigned long foo = 0;
__asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
printk(UM_KERN_CONT "Yes\n");
} else
printk(UM_KERN_CONT "No\n");
sigaction(SIGILL, &old, &new);
}
void arch_examine_signal(int sig, struct uml_pt_regs *regs)
{
unsigned char tmp[2];
/*
* This is testing for a cmov (0x0f 0x4x) instruction causing a
* SIGILL in init.
*/
if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
return;
if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
printk(UM_KERN_ERR "SIGILL in init, could not read "
"instructions!\n");
return;
}
if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
return;
if (host_has_cmov == 0)
printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
"processor doesn't implement. Boot a filesystem "
"compiled for older processors");
else if (host_has_cmov == 1)
printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
"processor claims to implement");
else
printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
host_has_cmov);
}

View File

@@ -0,0 +1,458 @@
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Pentium Pro/II routines:
* Alexander Kjeldaas <astor@guardian.no>
* Finn Arne Gangstad <finnag@guardian.no>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
* handling.
* Andi Kleen, add zeroing on error
* converted to pure assembler
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/errno.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
/*
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
*/
.text
.align 4
.globl csum_partial
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
/*
* Experiments with Ethernet and SLIP connections show that buff
* is aligned on either a 2-byte or 4-byte boundary. We get at
* least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
csum_partial:
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff
testl $2, %esi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %ecx # ecx was < 2. Deal with it.
jmp 4f
1: movw (%esi), %bx
addl $2, %esi
addw %bx, %ax
adcl $0, %eax
2:
movl %ecx, %edx
shrl $5, %ecx
jz 2f
testl %esi, %esi
1: movl (%esi), %ebx
adcl %ebx, %eax
movl 4(%esi), %ebx
adcl %ebx, %eax
movl 8(%esi), %ebx
adcl %ebx, %eax
movl 12(%esi), %ebx
adcl %ebx, %eax
movl 16(%esi), %ebx
adcl %ebx, %eax
movl 20(%esi), %ebx
adcl %ebx, %eax
movl 24(%esi), %ebx
adcl %ebx, %eax
movl 28(%esi), %ebx
adcl %ebx, %eax
lea 32(%esi), %esi
dec %ecx
jne 1b
adcl $0, %eax
2: movl %edx, %ecx
andl $0x1c, %edx
je 4f
shrl $2, %edx # This clears CF
3: adcl (%esi), %eax
lea 4(%esi), %esi
dec %edx
jne 3b
adcl $0, %eax
4: andl $3, %ecx
jz 7f
cmpl $2, %ecx
jb 5f
movw (%esi),%cx
leal 2(%esi),%esi
je 6f
shll $16,%ecx
5: movb (%esi),%cl
6: addl %ecx,%eax
adcl $0, %eax
7:
popl %ebx
popl %esi
ret
#else
/* Version for PentiumII/PPro */
csum_partial:
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf
testl $2, %esi
jnz 30f
10:
movl %ecx, %edx
movl %ecx, %ebx
andl $0x7c, %ebx
shrl $7, %ecx
addl %ebx,%esi
shrl $2, %ebx
negl %ebx
lea 45f(%ebx,%ebx,2), %ebx
testl %esi, %esi
jmp *%ebx
# Handle 2-byte-aligned regions
20: addw (%esi), %ax
lea 2(%esi), %esi
adcl $0, %eax
jmp 10b
30: subl $2, %ecx
ja 20b
je 32f
movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
addl %ebx, %eax
adcl $0, %eax
jmp 80f
32:
addw (%esi), %ax # csumming 2 bytes, 2-aligned
adcl $0, %eax
jmp 80f
40:
addl -128(%esi), %eax
adcl -124(%esi), %eax
adcl -120(%esi), %eax
adcl -116(%esi), %eax
adcl -112(%esi), %eax
adcl -108(%esi), %eax
adcl -104(%esi), %eax
adcl -100(%esi), %eax
adcl -96(%esi), %eax
adcl -92(%esi), %eax
adcl -88(%esi), %eax
adcl -84(%esi), %eax
adcl -80(%esi), %eax
adcl -76(%esi), %eax
adcl -72(%esi), %eax
adcl -68(%esi), %eax
adcl -64(%esi), %eax
adcl -60(%esi), %eax
adcl -56(%esi), %eax
adcl -52(%esi), %eax
adcl -48(%esi), %eax
adcl -44(%esi), %eax
adcl -40(%esi), %eax
adcl -36(%esi), %eax
adcl -32(%esi), %eax
adcl -28(%esi), %eax
adcl -24(%esi), %eax
adcl -20(%esi), %eax
adcl -16(%esi), %eax
adcl -12(%esi), %eax
adcl -8(%esi), %eax
adcl -4(%esi), %eax
45:
lea 128(%esi), %esi
adcl $0, %eax
dec %ecx
jge 40b
movl %edx, %ecx
50: andl $3, %ecx
jz 80f
# Handle the last 1-3 bytes without jumping
notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
movl $0xffffff,%ebx # by the shll and shrl instructions
shll $3,%ecx
shrl %cl,%ebx
andl -128(%esi),%ebx # esi is 4-aligned so should be ok
addl %ebx,%eax
adcl $0,%eax
80:
popl %ebx
popl %esi
ret
#endif
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
int len, int sum, int *src_err_ptr, int *dst_err_ptr)
*/
/*
* Copy from ds while checksumming, otherwise like csum_partial
*
* The macros SRC and DST specify the type of access for the instruction.
* thus we can call a custom exception handler for all access types.
*
* FIXME: could someone double-check whether I haven't mixed up some SRC and
* DST definitions? It's damn hard to trigger all cases. I hope I got
* them all but there's no guarantee.
*/
#define SRC(y...) \
9999: y; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
#define DST(y...) \
9999: y; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
.align 4
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
#define ARGBASE 16
#define FP 12
csum_partial_copy_generic_i386:
subl $4,%esp
pushl %edi
pushl %esi
pushl %ebx
movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src
movl ARGBASE+8(%esp),%edi # dst
testl $2, %edi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %ecx # ecx was < 2. Deal with it.
jmp 4f
SRC(1: movw (%esi), %bx )
addl $2, %esi
DST( movw %bx, (%edi) )
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
2:
movl %ecx, FP(%esp)
shrl $5, %ecx
jz 2f
testl %esi, %esi
SRC(1: movl (%esi), %ebx )
SRC( movl 4(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, (%edi) )
adcl %edx, %eax
DST( movl %edx, 4(%edi) )
SRC( movl 8(%esi), %ebx )
SRC( movl 12(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, 8(%edi) )
adcl %edx, %eax
DST( movl %edx, 12(%edi) )
SRC( movl 16(%esi), %ebx )
SRC( movl 20(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, 16(%edi) )
adcl %edx, %eax
DST( movl %edx, 20(%edi) )
SRC( movl 24(%esi), %ebx )
SRC( movl 28(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, 24(%edi) )
adcl %edx, %eax
DST( movl %edx, 28(%edi) )
lea 32(%esi), %esi
lea 32(%edi), %edi
dec %ecx
jne 1b
adcl $0, %eax
2: movl FP(%esp), %edx
movl %edx, %ecx
andl $0x1c, %edx
je 4f
shrl $2, %edx # This clears CF
SRC(3: movl (%esi), %ebx )
adcl %ebx, %eax
DST( movl %ebx, (%edi) )
lea 4(%esi), %esi
lea 4(%edi), %edi
dec %edx
jne 3b
adcl $0, %eax
4: andl $3, %ecx
jz 7f
cmpl $2, %ecx
jb 5f
SRC( movw (%esi), %cx )
leal 2(%esi), %esi
DST( movw %cx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%ecx
SRC(5: movb (%esi), %cl )
DST( movb %cl, (%edi) )
6: addl %ecx, %eax
adcl $0, %eax
7:
5000:
# Exception handler:
.section .fixup, "ax"
6001:
movl ARGBASE+20(%esp), %ebx # src_err_ptr
movl $-EFAULT, (%ebx)
# zero the complete destination - computing the rest
# is too much work
movl ARGBASE+8(%esp), %edi # dst
movl ARGBASE+12(%esp), %ecx # len
xorl %eax,%eax
rep ; stosb
jmp 5000b
6002:
movl ARGBASE+24(%esp), %ebx # dst_err_ptr
movl $-EFAULT,(%ebx)
jmp 5000b
.previous
popl %ebx
popl %esi
popl %edi
popl %ecx # equivalent to addl $4,%esp
ret
#else
/* Version for PentiumII/PPro */
#define ROUND1(x) \
SRC(movl x(%esi), %ebx ) ; \
addl %ebx, %eax ; \
DST(movl %ebx, x(%edi) ) ;
#define ROUND(x) \
SRC(movl x(%esi), %ebx ) ; \
adcl %ebx, %eax ; \
DST(movl %ebx, x(%edi) ) ;
#define ARGBASE 12
csum_partial_copy_generic_i386:
pushl %ebx
pushl %edi
pushl %esi
movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len
movl ARGBASE+16(%esp),%eax #sum
# movl %ecx, %edx
movl %ecx, %ebx
movl %esi, %edx
shrl $6, %ecx
andl $0x3c, %ebx
negl %ebx
subl %ebx, %esi
subl %ebx, %edi
lea -1(%esi),%edx
andl $-32,%edx
lea 3f(%ebx,%ebx), %ebx
testl %esi, %esi
jmp *%ebx
1: addl $64,%esi
addl $64,%edi
SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
3: adcl $0,%eax
addl $64, %edx
dec %ecx
jge 1b
4: movl ARGBASE+12(%esp),%edx #len
andl $3, %edx
jz 7f
cmpl $2, %edx
jb 5f
SRC( movw (%esi), %dx )
leal 2(%esi), %esi
DST( movw %dx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%edx
5:
SRC( movb (%esi), %dl )
DST( movb %dl, (%edi) )
6: addl %edx, %eax
adcl $0, %eax
7:
.section .fixup, "ax"
6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
movl $-EFAULT, (%ebx)
# zero the complete destination (computing the rest is too much work)
movl ARGBASE+8(%esp),%edi # dst
movl ARGBASE+12(%esp),%ecx # len
xorl %eax,%eax
rep; stosb
jmp 7b
6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
movl $-EFAULT, (%ebx)
jmp 7b
.previous
popl %esi
popl %edi
popl %ebx
ret
#undef ROUND
#undef ROUND1
#endif

View File

@@ -0,0 +1,29 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
void __delay(unsigned long time)
{
/* Stolen from the i386 __loop_delay */
int d0;
__asm__ __volatile__(
"\tjmp 1f\n"
".align 16\n"
"1:\tjmp 2f\n"
".align 16\n"
"2:\tdecl %0\n\tjns 2b"
:"=&a" (d0)
:"0" (time));
}
void __udelay(unsigned long usecs)
{
int i, n;
n = (loops_per_jiffy * HZ * usecs) / MILLION;
for(i=0;i<n;i++)
cpu_relax();
}
EXPORT_SYMBOL(__udelay);

View File

@@ -0,0 +1,28 @@
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "sysdep/ptrace.h"
/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
struct exception_table_entry
{
unsigned long insn;
unsigned long fixup;
};
const struct exception_table_entry *search_exception_tables(unsigned long add);
/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(address);
if (fixup != 0) {
UPT_IP(regs) = fixup->fixup;
return 1;
}
return 0;
}

View File

@@ -0,0 +1,5 @@
#include "linux/module.h"
#include "asm/checksum.h"
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial);

View File

@@ -0,0 +1,501 @@
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/unistd.h>
#include "os.h"
#include "proc_mm.h"
#include "skas.h"
#include "skas_ptrace.h"
#include "sysdep/tls.h"
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
static long write_ldt_entry(struct mm_id *mm_idp, int func,
struct user_desc *desc, void **addr, int done)
{
long res;
if (proc_mm) {
/*
* This is a special handling for the case, that the mm to
* modify isn't current->active_mm.
* If this is called directly by modify_ldt,
* (current->active_mm->context.skas.u == mm_idp)
* will be true. So no call to __switch_mm(mm_idp) is done.
* If this is called in case of init_new_ldt or PTRACE_LDT,
* mm_idp won't belong to current->active_mm, but child->mm.
* So we need to switch child's mm into our userspace, then
* later switch back.
*
* Note: I'm unsure: should interrupts be disabled here?
*/
if (!current->active_mm || current->active_mm == &init_mm ||
mm_idp != &current->active_mm->context.id)
__switch_mm(mm_idp);
}
if (ptrace_ldt) {
struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
.func = func,
.ptr = desc,
.bytecount = sizeof(*desc)};
u32 cpu;
int pid;
if (!proc_mm)
pid = mm_idp->u.pid;
else {
cpu = get_cpu();
pid = userspace_pid[cpu];
}
res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
if (proc_mm)
put_cpu();
}
else {
void *stub_addr;
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
(sizeof(*desc) + sizeof(long) - 1) &
~(sizeof(long) - 1),
addr, &stub_addr);
if (!res) {
unsigned long args[] = { func,
(unsigned long)stub_addr,
sizeof(*desc),
0, 0, 0 };
res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
0, addr, done);
}
}
if (proc_mm) {
/*
* This is the second part of special handling, that makes
* PTRACE_LDT possible to implement.
*/
if (current->active_mm && current->active_mm != &init_mm &&
mm_idp != &current->active_mm->context.id)
__switch_mm(&current->active_mm->context.id);
}
return res;
}
static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
{
int res, n;
struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
.func = 0,
.bytecount = bytecount,
.ptr = kmalloc(bytecount, GFP_KERNEL)};
u32 cpu;
if (ptrace_ldt.ptr == NULL)
return -ENOMEM;
/*
* This is called from sys_modify_ldt only, so userspace_pid gives
* us the right number
*/
cpu = get_cpu();
res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
put_cpu();
if (res < 0)
goto out;
n = copy_to_user(ptr, ptrace_ldt.ptr, res);
if (n != 0)
res = -EFAULT;
out:
kfree(ptrace_ldt.ptr);
return res;
}
/*
* In skas mode, we hold our own ldt data in UML.
* Thus, the code implementing sys_modify_ldt_skas
* is very similar to (and mostly stolen from) sys_modify_ldt
* for arch/i386/kernel/ldt.c
* The routines copied and modified in part are:
* - read_ldt
* - read_default_ldt
* - write_ldt
* - sys_modify_ldt_skas
*/
static int read_ldt(void __user * ptr, unsigned long bytecount)
{
int i, err = 0;
unsigned long size;
uml_ldt_t * ldt = &current->mm->context.ldt;
if (!ldt->entry_count)
goto out;
if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
err = bytecount;
if (ptrace_ldt)
return read_ldt_from_host(ptr, bytecount);
mutex_lock(&ldt->lock);
if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
if (size > bytecount)
size = bytecount;
if (copy_to_user(ptr, ldt->u.entries, size))
err = -EFAULT;
bytecount -= size;
ptr += size;
}
else {
for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
i++) {
size = PAGE_SIZE;
if (size > bytecount)
size = bytecount;
if (copy_to_user(ptr, ldt->u.pages[i], size)) {
err = -EFAULT;
break;
}
bytecount -= size;
ptr += size;
}
}
mutex_unlock(&ldt->lock);
if (bytecount == 0 || err == -EFAULT)
goto out;
if (clear_user(ptr, bytecount))
err = -EFAULT;
out:
return err;
}
static int read_default_ldt(void __user * ptr, unsigned long bytecount)
{
int err;
if (bytecount > 5*LDT_ENTRY_SIZE)
bytecount = 5*LDT_ENTRY_SIZE;
err = bytecount;
/*
* UML doesn't support lcall7 and lcall27.
* So, we don't really have a default ldt, but emulate
* an empty ldt of common host default ldt size.
*/
if (clear_user(ptr, bytecount))
err = -EFAULT;
return err;
}
static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
{
uml_ldt_t * ldt = &current->mm->context.ldt;
struct mm_id * mm_idp = &current->mm->context.id;
int i, err;
struct user_desc ldt_info;
struct ldt_entry entry0, *ldt_p;
void *addr = NULL;
err = -EINVAL;
if (bytecount != sizeof(ldt_info))
goto out;
err = -EFAULT;
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
goto out;
err = -EINVAL;
if (ldt_info.entry_number >= LDT_ENTRIES)
goto out;
if (ldt_info.contents == 3) {
if (func == 1)
goto out;
if (ldt_info.seg_not_present == 0)
goto out;
}
if (!ptrace_ldt)
mutex_lock(&ldt->lock);
err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
if (err)
goto out_unlock;
else if (ptrace_ldt) {
/* With PTRACE_LDT available, this is used as a flag only */
ldt->entry_count = 1;
goto out;
}
if (ldt_info.entry_number >= ldt->entry_count &&
ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
i++) {
if (i == 0)
memcpy(&entry0, ldt->u.entries,
sizeof(entry0));
ldt->u.pages[i] = (struct ldt_entry *)
__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!ldt->u.pages[i]) {
err = -ENOMEM;
/* Undo the change in host */
memset(&ldt_info, 0, sizeof(ldt_info));
write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
goto out_unlock;
}
if (i == 0) {
memcpy(ldt->u.pages[0], &entry0,
sizeof(entry0));
memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
}
ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
}
}
if (ldt->entry_count <= ldt_info.entry_number)
ldt->entry_count = ldt_info.entry_number + 1;
if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
ldt_p = ldt->u.entries + ldt_info.entry_number;
else
ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
(func == 1 || LDT_empty(&ldt_info))) {
ldt_p->a = 0;
ldt_p->b = 0;
}
else{
if (func == 1)
ldt_info.useable = 0;
ldt_p->a = LDT_entry_a(&ldt_info);
ldt_p->b = LDT_entry_b(&ldt_info);
}
err = 0;
out_unlock:
mutex_unlock(&ldt->lock);
out:
return err;
}
static long do_modify_ldt_skas(int func, void __user *ptr,
unsigned long bytecount)
{
int ret = -ENOSYS;
switch (func) {
case 0:
ret = read_ldt(ptr, bytecount);
break;
case 1:
case 0x11:
ret = write_ldt(ptr, bytecount, func);
break;
case 2:
ret = read_default_ldt(ptr, bytecount);
break;
}
return ret;
}
static DEFINE_SPINLOCK(host_ldt_lock);
static short dummy_list[9] = {0, -1};
static short * host_ldt_entries = NULL;
static void ldt_get_host_info(void)
{
long ret;
struct ldt_entry * ldt;
short *tmp;
int i, size, k, order;
spin_lock(&host_ldt_lock);
if (host_ldt_entries != NULL) {
spin_unlock(&host_ldt_lock);
return;
}
host_ldt_entries = dummy_list+1;
spin_unlock(&host_ldt_lock);
for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
;
ldt = (struct ldt_entry *)
__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
if (ldt == NULL) {
printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
"for host ldt\n");
return;
}
ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
if (ret < 0) {
printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
goto out_free;
}
if (ret == 0) {
/* default_ldt is active, simply write an empty entry 0 */
host_ldt_entries = dummy_list;
goto out_free;
}
for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
if (ldt[i].a != 0 || ldt[i].b != 0)
size++;
}
if (size < ARRAY_SIZE(dummy_list))
host_ldt_entries = dummy_list;
else {
size = (size + 1) * sizeof(dummy_list[0]);
tmp = kmalloc(size, GFP_KERNEL);
if (tmp == NULL) {
printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
"host ldt list\n");
goto out_free;
}
host_ldt_entries = tmp;
}
for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
if (ldt[i].a != 0 || ldt[i].b != 0)
host_ldt_entries[k++] = i;
}
host_ldt_entries[k] = -1;
out_free:
free_pages((unsigned long)ldt, order);
}
long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
{
struct user_desc desc;
short * num_p;
int i;
long page, err=0;
void *addr = NULL;
struct proc_mm_op copy;
if (!ptrace_ldt)
mutex_init(&new_mm->ldt.lock);
if (!from_mm) {
memset(&desc, 0, sizeof(desc));
/*
* We have to initialize a clean ldt.
*/
if (proc_mm) {
/*
* If the new mm was created using proc_mm, host's
* default-ldt currently is assigned, which normally
* contains the call-gates for lcall7 and lcall27.
* To remove these gates, we simply write an empty
* entry as number 0 to the host.
*/
err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
}
else{
/*
* Now we try to retrieve info about the ldt, we
* inherited from the host. All ldt-entries found
* will be reset in the following loop
*/
ldt_get_host_info();
for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
desc.entry_number = *num_p;
err = write_ldt_entry(&new_mm->id, 1, &desc,
&addr, *(num_p + 1) == -1);
if (err)
break;
}
}
new_mm->ldt.entry_count = 0;
goto out;
}
if (proc_mm) {
/*
* We have a valid from_mm, so we now have to copy the LDT of
* from_mm to new_mm, because using proc_mm an new mm with
* an empty/default LDT was created in new_mm()
*/
copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
.u =
{ .copy_segments =
from_mm->id.u.mm_fd } } );
i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
if (i != sizeof(copy))
printk(KERN_ERR "new_mm : /proc/mm copy_segments "
"failed, err = %d\n", -i);
}
if (!ptrace_ldt) {
/*
* Our local LDT is used to supply the data for
* modify_ldt(READLDT), if PTRACE_LDT isn't available,
* i.e., we have to use the stub for modify_ldt, which
* can't handle the big read buffer of up to 64kB.
*/
mutex_lock(&from_mm->ldt.lock);
if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
sizeof(new_mm->ldt.u.entries));
else {
i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
while (i-->0) {
page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!page) {
err = -ENOMEM;
break;
}
new_mm->ldt.u.pages[i] =
(struct ldt_entry *) page;
memcpy(new_mm->ldt.u.pages[i],
from_mm->ldt.u.pages[i], PAGE_SIZE);
}
}
new_mm->ldt.entry_count = from_mm->ldt.entry_count;
mutex_unlock(&from_mm->ldt.lock);
}
out:
return err;
}
void free_ldt(struct mm_context *mm)
{
int i;
if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) {
i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
while (i-- > 0)
free_page((long) mm->ldt.u.pages[i]);
}
mm->ldt.entry_count = 0;
}
int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
{
return do_modify_ldt_skas(func, ptr, bytecount);
}

View File

@@ -0,0 +1,210 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/mm.h"
#include "linux/sched.h"
#include "asm/uaccess.h"
#include "skas.h"
extern int arch_switch_tls(struct task_struct *to);
void arch_switch_to(struct task_struct *to)
{
int err = arch_switch_tls(to);
if (!err)
return;
if (err != -EINVAL)
printk(KERN_WARNING "arch_switch_tls failed, errno %d, "
"not EINVAL\n", -err);
else
printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
}
int is_syscall(unsigned long addr)
{
unsigned short instr;
int n;
n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
if (n) {
/* access_process_vm() grants access to vsyscall and stub,
* while copy_from_user doesn't. Maybe access_process_vm is
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
if (n != sizeof(instr)) {
printk(KERN_ERR "is_syscall : failed to read "
"instruction from 0x%lx\n", addr);
return 1;
}
}
/* int 0x80 or sysenter */
return (instr == 0x80cd) || (instr == 0x340f);
}
/* determines which flags the user has access to. */
/* 1 = access 0 = no access */
#define FLAG_MASK 0x00044dd5
int putreg(struct task_struct *child, int regno, unsigned long value)
{
regno >>= 2;
switch (regno) {
case FS:
if (value && (value & 3) != 3)
return -EIO;
PT_REGS_FS(&child->thread.regs) = value;
return 0;
case GS:
if (value && (value & 3) != 3)
return -EIO;
PT_REGS_GS(&child->thread.regs) = value;
return 0;
case DS:
case ES:
if (value && (value & 3) != 3)
return -EIO;
value &= 0xffff;
break;
case SS:
case CS:
if ((value & 3) != 3)
return -EIO;
value &= 0xffff;
break;
case EFL:
value &= FLAG_MASK;
value |= PT_REGS_EFLAGS(&child->thread.regs);
break;
}
PT_REGS_SET(&child->thread.regs, regno, value);
return 0;
}
int poke_user(struct task_struct *child, long addr, long data)
{
if ((addr & 3) || addr < 0)
return -EIO;
if (addr < MAX_REG_OFFSET)
return putreg(child, addr, data);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
return 0;
}
return -EIO;
}
unsigned long getreg(struct task_struct *child, int regno)
{
unsigned long retval = ~0UL;
regno >>= 2;
switch (regno) {
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
retval = 0xffff;
/* fall through */
default:
retval &= PT_REG(&child->thread.regs, regno);
}
return retval;
}
/* read the word at location addr in the USER area. */
int peek_user(struct task_struct *child, long addr, long data)
{
unsigned long tmp;
if ((addr & 3) || addr < 0)
return -EIO;
tmp = 0; /* Default return condition */
if (addr < MAX_REG_OFFSET) {
tmp = getreg(child, addr);
}
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
tmp = child->thread.arch.debugregs[addr];
}
return put_user(tmp, (unsigned long __user *) data);
}
int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
struct user_i387_struct fpregs;
err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
if (err)
return err;
n = copy_to_user(buf, &fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;
return n;
}
int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int n, cpu = ((struct thread_info *) child->stack)->cpu;
struct user_i387_struct fpregs;
n = copy_from_user(&fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return restore_fp_registers(userspace_pid[cpu],
(unsigned long *) &fpregs);
}
int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
{
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
struct user_fxsr_struct fpregs;
err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
if (err)
return err;
n = copy_to_user(buf, &fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;
return n;
}
int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
{
int n, cpu = ((struct thread_info *) child->stack)->cpu;
struct user_fxsr_struct fpregs;
n = copy_from_user(&fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return restore_fpx_registers(userspace_pid[cpu],
(unsigned long *) &fpregs);
}
long subarch_ptrace(struct task_struct *child, long request, long addr,
long data)
{
return -EIO;
}

View File

@@ -0,0 +1,21 @@
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <errno.h>
#include <sys/ptrace.h>
int ptrace_getregs(long pid, unsigned long *regs_out)
{
if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
return -errno;
return 0;
}
int ptrace_setregs(long pid, unsigned long *regs)
{
if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
return -errno;
return 0;
}

View File

@@ -0,0 +1,58 @@
#
# arch/i386/setjmp.S
#
# setjmp/longjmp for the i386 architecture
#
#
# The jmp_buf is assumed to contain the following, in order:
# %ebx
# %esp
# %ebp
# %esi
# %edi
# <return address>
#
.text
.align 4
.globl setjmp
.type setjmp, @function
setjmp:
#ifdef _REGPARM
movl %eax,%edx
#else
movl 4(%esp),%edx
#endif
popl %ecx # Return address, and adjust the stack
xorl %eax,%eax # Return value
movl %ebx,(%edx)
movl %esp,4(%edx) # Post-return %esp!
pushl %ecx # Make the call/return stack happy
movl %ebp,8(%edx)
movl %esi,12(%edx)
movl %edi,16(%edx)
movl %ecx,20(%edx) # Return address
ret
.size setjmp,.-setjmp
.text
.align 4
.globl longjmp
.type longjmp, @function
longjmp:
#ifdef _REGPARM
xchgl %eax,%edx
#else
movl 4(%esp),%edx # jmp_ptr address
movl 8(%esp),%eax # Return value
#endif
movl (%edx),%ebx
movl 4(%edx),%esp
movl 8(%edx),%ebp
movl 12(%edx),%esi
movl 16(%edx),%edi
jmp *20(%edx)
.size longjmp,.-longjmp

View File

@@ -0,0 +1,22 @@
/*
* arch/um/include/sysdep-i386/archsetjmp.h
*/
#ifndef _KLIBC_ARCHSETJMP_H
#define _KLIBC_ARCHSETJMP_H
struct __jmp_buf {
unsigned int __ebx;
unsigned int __esp;
unsigned int __ebp;
unsigned int __esi;
unsigned int __edi;
unsigned int __eip;
};
typedef struct __jmp_buf jmp_buf[1];
#define JB_IP __eip
#define JB_SP __esp
#endif /* _SETJMP_H */

View File

@@ -0,0 +1,9 @@
#ifndef __SYSDEP_I386_BARRIER_H
#define __SYSDEP_I386_BARRIER_H
/* Copied from include/asm-i386 for use by userspace. i386 has the option
* of using mfence, but I'm just using this, which works everywhere, for now.
*/
#define mb() asm volatile("lock; addl $0,0(%esp)")
#endif

View File

@@ -0,0 +1,201 @@
/*
* Licensed under the GPL
*/
#ifndef __UM_SYSDEP_CHECKSUM_H
#define __UM_SYSDEP_CHECKSUM_H
#include "linux/in6.h"
#include "linux/string.h"
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static __inline__
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
static __inline__
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (copy_from_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return csum_partial(dst, len, sum);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
__asm__ __volatile__(
"movl (%1), %0 ;\n"
"subl $4, %2 ;\n"
"jbe 2f ;\n"
"addl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0 ;\n"
"1: adcl 16(%1), %0 ;\n"
"lea 4(%1), %1 ;\n"
"decl %2 ;\n"
"jne 1b ;\n"
"adcl $0, %0 ;\n"
"movl %0, %2 ;\n"
"shrl $16, %0 ;\n"
"addw %w2, %w0 ;\n"
"adcl $0, %0 ;\n"
"notl %0 ;\n"
"2: ;\n"
/* Since the input registers which are loaded with iph and ipl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
"addl %1, %0 ;\n"
"adcl $0xffff, %0 ;\n"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000)
);
return (__force __sum16)(~(__force u32)sum >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__(
"addl %1, %0 ;\n"
"adcl %2, %0 ;\n"
"adcl %3, %0 ;\n"
"adcl $0, %0 ;\n"
: "=r" (sum)
: "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__(
"addl 0(%1), %0 ;\n"
"adcl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0 ;\n"
"adcl 0(%2), %0 ;\n"
"adcl 4(%2), %0 ;\n"
"adcl 8(%2), %0 ;\n"
"adcl 12(%2), %0 ;\n"
"adcl %3, %0 ;\n"
"adcl %4, %0 ;\n"
"adcl $0, %0 ;\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len)) {
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return csum_partial(src, len, sum);
}
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif

View File

@@ -0,0 +1,29 @@
/*
* Copyright (C) 2004 Fujitsu Siemens Computers GmbH
* Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
* Licensed under the GPL
*/
#ifndef __FAULTINFO_I386_H
#define __FAULTINFO_I386_H
/* this structure contains the full arch-specific faultinfo
* from the traps.
* On i386, ptrace_faultinfo unfortunately doesn't provide
* all the info, since trap_no is missing.
* All common elements are defined at the same position in
* both structures, thus making it easy to copy the
* contents without knowledge about the structure elements.
*/
struct faultinfo {
int error_code; /* in ptrace_faultinfo misleadingly called is_write */
unsigned long cr2; /* in ptrace_faultinfo called addr */
int trap_no; /* missing in ptrace_faultinfo */
};
#define FAULT_WRITE(fi) ((fi).error_code & 2)
#define FAULT_ADDRESS(fi) ((fi).cr2)
#define PTRACE_FULL_FAULTINFO 0
#endif

View File

@@ -0,0 +1,34 @@
#ifndef __ASM_HOST_LDT_I386_H
#define __ASM_HOST_LDT_I386_H
#include <asm/ldt.h>
/*
* macros stolen from include/asm-i386/desc.h
*/
#define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
#define LDT_entry_b(info) \
(((info)->base_addr & 0xff000000) | \
(((info)->base_addr & 0x00ff0000) >> 16) | \
((info)->limit & 0xf0000) | \
(((info)->read_exec_only ^ 1) << 9) | \
((info)->contents << 10) | \
(((info)->seg_not_present ^ 1) << 15) | \
((info)->seg_32bit << 22) | \
((info)->limit_in_pages << 23) | \
((info)->useable << 20) | \
0x7000)
#define LDT_empty(info) (\
(info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0 )
#endif

View File

@@ -0,0 +1,21 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/elf.h>
#include <linux/crypto.h>
#include <asm/mman.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define STR(x) #x
#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
#define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
void foo(void)
{
#include <common-offsets.h>
}

View File

@@ -0,0 +1,170 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_I386_PTRACE_H
#define __SYSDEP_I386_PTRACE_H
#include "user_constants.h"
#include "sysdep/faultinfo.h"
#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
#define MAX_REG_OFFSET (UM_FRAME_SIZE)
static inline void update_debugregs(int seq) {}
/* syscall emulation path in ptrace */
#ifndef PTRACE_SYSEMU
#define PTRACE_SYSEMU 31
#endif
void set_using_sysemu(int value);
int get_using_sysemu(void);
extern int sysemu_supported;
#include "skas_ptregs.h"
#define REGS_IP(r) ((r)[HOST_IP])
#define REGS_SP(r) ((r)[HOST_SP])
#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
#define REGS_EAX(r) ((r)[HOST_EAX])
#define REGS_EBX(r) ((r)[HOST_EBX])
#define REGS_ECX(r) ((r)[HOST_ECX])
#define REGS_EDX(r) ((r)[HOST_EDX])
#define REGS_ESI(r) ((r)[HOST_ESI])
#define REGS_EDI(r) ((r)[HOST_EDI])
#define REGS_EBP(r) ((r)[HOST_EBP])
#define REGS_CS(r) ((r)[HOST_CS])
#define REGS_SS(r) ((r)[HOST_SS])
#define REGS_DS(r) ((r)[HOST_DS])
#define REGS_ES(r) ((r)[HOST_ES])
#define REGS_FS(r) ((r)[HOST_FS])
#define REGS_GS(r) ((r)[HOST_GS])
#define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res)
#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
#ifndef PTRACE_SYSEMU_SINGLESTEP
#define PTRACE_SYSEMU_SINGLESTEP 32
#endif
struct uml_pt_regs {
unsigned long gp[MAX_REG_NR];
struct faultinfo faultinfo;
long syscall;
int is_user;
};
#define EMPTY_UML_PT_REGS { }
#define UPT_IP(r) REGS_IP((r)->gp)
#define UPT_SP(r) REGS_SP((r)->gp)
#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
#define UPT_EAX(r) REGS_EAX((r)->gp)
#define UPT_EBX(r) REGS_EBX((r)->gp)
#define UPT_ECX(r) REGS_ECX((r)->gp)
#define UPT_EDX(r) REGS_EDX((r)->gp)
#define UPT_ESI(r) REGS_ESI((r)->gp)
#define UPT_EDI(r) REGS_EDI((r)->gp)
#define UPT_EBP(r) REGS_EBP((r)->gp)
#define UPT_ORIG_EAX(r) ((r)->syscall)
#define UPT_CS(r) REGS_CS((r)->gp)
#define UPT_SS(r) REGS_SS((r)->gp)
#define UPT_DS(r) REGS_DS((r)->gp)
#define UPT_ES(r) REGS_ES((r)->gp)
#define UPT_FS(r) REGS_FS((r)->gp)
#define UPT_GS(r) REGS_GS((r)->gp)
#define UPT_SYSCALL_ARG1(r) UPT_EBX(r)
#define UPT_SYSCALL_ARG2(r) UPT_ECX(r)
#define UPT_SYSCALL_ARG3(r) UPT_EDX(r)
#define UPT_SYSCALL_ARG4(r) UPT_ESI(r)
#define UPT_SYSCALL_ARG5(r) UPT_EDI(r)
#define UPT_SYSCALL_ARG6(r) UPT_EBP(r)
extern int user_context(unsigned long sp);
#define UPT_IS_USER(r) ((r)->is_user)
struct syscall_args {
unsigned long args[6];
};
#define SYSCALL_ARGS(r) ((struct syscall_args) \
{ .args = { UPT_SYSCALL_ARG1(r), \
UPT_SYSCALL_ARG2(r), \
UPT_SYSCALL_ARG3(r), \
UPT_SYSCALL_ARG4(r), \
UPT_SYSCALL_ARG5(r), \
UPT_SYSCALL_ARG6(r) } } )
#define UPT_REG(regs, reg) \
({ unsigned long val; \
switch(reg){ \
case EIP: val = UPT_IP(regs); break; \
case UESP: val = UPT_SP(regs); break; \
case EAX: val = UPT_EAX(regs); break; \
case EBX: val = UPT_EBX(regs); break; \
case ECX: val = UPT_ECX(regs); break; \
case EDX: val = UPT_EDX(regs); break; \
case ESI: val = UPT_ESI(regs); break; \
case EDI: val = UPT_EDI(regs); break; \
case EBP: val = UPT_EBP(regs); break; \
case ORIG_EAX: val = UPT_ORIG_EAX(regs); break; \
case CS: val = UPT_CS(regs); break; \
case SS: val = UPT_SS(regs); break; \
case DS: val = UPT_DS(regs); break; \
case ES: val = UPT_ES(regs); break; \
case FS: val = UPT_FS(regs); break; \
case GS: val = UPT_GS(regs); break; \
case EFL: val = UPT_EFLAGS(regs); break; \
default : \
panic("Bad register in UPT_REG : %d\n", reg); \
val = -1; \
} \
val; \
})
#define UPT_SET(regs, reg, val) \
do { \
switch(reg){ \
case EIP: UPT_IP(regs) = val; break; \
case UESP: UPT_SP(regs) = val; break; \
case EAX: UPT_EAX(regs) = val; break; \
case EBX: UPT_EBX(regs) = val; break; \
case ECX: UPT_ECX(regs) = val; break; \
case EDX: UPT_EDX(regs) = val; break; \
case ESI: UPT_ESI(regs) = val; break; \
case EDI: UPT_EDI(regs) = val; break; \
case EBP: UPT_EBP(regs) = val; break; \
case ORIG_EAX: UPT_ORIG_EAX(regs) = val; break; \
case CS: UPT_CS(regs) = val; break; \
case SS: UPT_SS(regs) = val; break; \
case DS: UPT_DS(regs) = val; break; \
case ES: UPT_ES(regs) = val; break; \
case FS: UPT_FS(regs) = val; break; \
case GS: UPT_GS(regs) = val; break; \
case EFL: UPT_EFLAGS(regs) = val; break; \
default : \
panic("Bad register in UPT_SET : %d\n", reg); \
break; \
} \
} while (0)
#define UPT_SET_SYSCALL_RETURN(r, res) \
REGS_SET_SYSCALL_RETURN((r)->regs, (res))
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
#define UPT_ORIG_SYSCALL(r) UPT_EAX(r)
#define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r)
#define UPT_SYSCALL_RET(r) UPT_EAX(r)
#define UPT_FAULTINFO(r) (&(r)->faultinfo)
extern void arch_init_registers(int pid);
#endif

View File

@@ -0,0 +1,50 @@
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_I386_PTRACE_USER_H__
#define __SYSDEP_I386_PTRACE_USER_H__
#include <sys/ptrace.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#include "user_constants.h"
#define PT_OFFSET(r) ((r) * sizeof(long))
#define PT_SYSCALL_NR(regs) ((regs)[ORIG_EAX])
#define PT_SYSCALL_NR_OFFSET PT_OFFSET(ORIG_EAX)
#define PT_SYSCALL_ARG1_OFFSET PT_OFFSET(EBX)
#define PT_SYSCALL_ARG2_OFFSET PT_OFFSET(ECX)
#define PT_SYSCALL_ARG3_OFFSET PT_OFFSET(EDX)
#define PT_SYSCALL_ARG4_OFFSET PT_OFFSET(ESI)
#define PT_SYSCALL_ARG5_OFFSET PT_OFFSET(EDI)
#define PT_SYSCALL_ARG6_OFFSET PT_OFFSET(EBP)
#define PT_SYSCALL_RET_OFFSET PT_OFFSET(EAX)
#define REGS_SYSCALL_NR EAX /* This is used before a system call */
#define REGS_SYSCALL_ARG1 EBX
#define REGS_SYSCALL_ARG2 ECX
#define REGS_SYSCALL_ARG3 EDX
#define REGS_SYSCALL_ARG4 ESI
#define REGS_SYSCALL_ARG5 EDI
#define REGS_SYSCALL_ARG6 EBP
#define REGS_IP_INDEX EIP
#define REGS_SP_INDEX UESP
#define PT_IP_OFFSET PT_OFFSET(EIP)
#define PT_IP(regs) ((regs)[EIP])
#define PT_SP_OFFSET PT_OFFSET(UESP)
#define PT_SP(regs) ((regs)[UESP])
#define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
#ifndef FRAME_SIZE
#define FRAME_SIZE (17)
#endif
#endif

View File

@@ -0,0 +1,44 @@
#ifndef __SYSDEP_I386_SC_H
#define __SYSDEP_I386_SC_H
#include <user_constants.h>
#define SC_OFFSET(sc, field) \
*((unsigned long *) &(((char *) (sc))[HOST_##field]))
#define SC_FP_OFFSET(sc, field) \
*((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
#define SC_FP_OFFSET_PTR(sc, field, type) \
((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
#define SC_EAX(sc) SC_OFFSET(sc, SC_EAX)
#define SC_EBX(sc) SC_OFFSET(sc, SC_EBX)
#define SC_ECX(sc) SC_OFFSET(sc, SC_ECX)
#define SC_EDX(sc) SC_OFFSET(sc, SC_EDX)
#define SC_EDI(sc) SC_OFFSET(sc, SC_EDI)
#define SC_ESI(sc) SC_OFFSET(sc, SC_ESI)
#define SC_EBP(sc) SC_OFFSET(sc, SC_EBP)
#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
#define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE)
#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
#define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW)
#define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW)
#define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG)
#define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF)
#define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL)
#define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF)
#define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL)
#define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate)
#define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void)
#endif

View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __SYS_SIGCONTEXT_I386_H
#define __SYS_SIGCONTEXT_I386_H
#include "sysdep/sc.h"
#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
#define GET_FAULTINFO_FROM_SC(fi, sc) \
{ \
(fi).cr2 = SC_CR2(sc); \
(fi).error_code = SC_ERR(sc); \
(fi).trap_no = SC_TRAPNO(sc); \
}
/* This is Page Fault */
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
#endif

View File

@@ -0,0 +1,22 @@
/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_I386_SKAS_PTRACE_H
#define __SYSDEP_I386_SKAS_PTRACE_H
struct ptrace_faultinfo {
int is_write;
unsigned long addr;
};
struct ptrace_ldt {
int func;
void *ptr;
unsigned long bytecount;
};
#define PTRACE_LDT 54
#endif

View File

@@ -0,0 +1,101 @@
/*
* Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_STUB_H
#define __SYSDEP_STUB_H
#include <sys/mman.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
#include "as-layout.h"
#include "stub-data.h"
#include "kern_constants.h"
extern void stub_segv_handler(int sig);
extern void stub_clone_handler(void);
#define STUB_SYSCALL_RET EAX
#define STUB_MMAP_NR __NR_mmap2
#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
static inline long stub_syscall0(long syscall)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
return ret;
}
static inline long stub_syscall1(long syscall, long arg1)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
return ret;
}
static inline long stub_syscall2(long syscall, long arg1, long arg2)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2));
return ret;
}
static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2), "d" (arg3));
return ret;
}
static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
long arg4)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2), "d" (arg3), "S" (arg4));
return ret;
}
static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
long arg4, long arg5)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
return ret;
}
static inline void trap_myself(void)
{
__asm("int3");
}
static inline void remap_stack(int fd, unsigned long offset)
{
__asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
"movl %7, %%ebx ; movl %%eax, (%%ebx)"
: : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
"c" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE),
"S" (MAP_FIXED | MAP_SHARED), "D" (fd),
"a" (offset),
"i" (&((struct stub_data *) STUB_DATA)->err)
: "memory");
}
#endif

View File

@@ -0,0 +1,22 @@
/*
* Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "asm/unistd.h"
#include "sysdep/ptrace.h"
typedef long syscall_handler_t(struct pt_regs);
/* Not declared on x86, incompatible declarations on x86_64, so these have
* to go here rather than in sys_call_table.c
*/
extern syscall_handler_t sys_rt_sigaction;
extern syscall_handler_t old_mmap_i386;
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
((long (*)(struct syscall_args)) \
(*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))

View File

@@ -0,0 +1,132 @@
#ifndef _ASM_X86_SYSTEM_H_
#define _ASM_X86_SYSTEM_H_
#include <asm/asm.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif
extern unsigned long arch_align_stack(unsigned long sp);
void default_idle(void);
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#ifdef CONFIG_X86_OOSTORE
# define smp_wmb() wmb()
#else
# define smp_wmb() barrier()
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif

View File

@@ -0,0 +1,32 @@
#ifndef _SYSDEP_TLS_H
#define _SYSDEP_TLS_H
# ifndef __KERNEL__
/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
* may be named user_desc (but in 2.4 and in header matching its API was named
* modify_ldt_ldt_s). */
typedef struct um_dup_user_desc {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
} user_desc_t;
# else /* __KERNEL__ */
# include <ldt.h>
typedef struct user_desc user_desc_t;
# endif /* __KERNEL__ */
#define GDT_ENTRY_TLS_MIN_I386 6
#define GDT_ENTRY_TLS_MIN_X86_64 12
#endif /* _SYSDEP_TLS_H */

View File

@@ -0,0 +1,14 @@
/*
* Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __VM_FLAGS_I386_H
#define __VM_FLAGS_I386_H
#define VM_DATA_DEFAULT_FLAGS \
(VM_READ | VM_WRITE | \
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif

View File

@@ -0,0 +1,508 @@
/*
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/ptrace.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include "frame_kern.h"
#include "skas.h"
void copy_sc(struct uml_pt_regs *regs, void *from)
{
struct sigcontext *sc = from;
REGS_GS(regs->gp) = sc->gs;
REGS_FS(regs->gp) = sc->fs;
REGS_ES(regs->gp) = sc->es;
REGS_DS(regs->gp) = sc->ds;
REGS_EDI(regs->gp) = sc->di;
REGS_ESI(regs->gp) = sc->si;
REGS_EBP(regs->gp) = sc->bp;
REGS_SP(regs->gp) = sc->sp;
REGS_EBX(regs->gp) = sc->bx;
REGS_EDX(regs->gp) = sc->dx;
REGS_ECX(regs->gp) = sc->cx;
REGS_EAX(regs->gp) = sc->ax;
REGS_IP(regs->gp) = sc->ip;
REGS_CS(regs->gp) = sc->cs;
REGS_EFLAGS(regs->gp) = sc->flags;
REGS_SS(regs->gp) = sc->ss;
}
/*
* FPU tag word conversions.
*/
static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
{
unsigned int tmp; /* to avoid 16 bit prefixes in the code */
/* Transform each pair of bits into 01 (valid) or 00 (empty) */
tmp = ~twd;
tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
/* and move the valid bits to the lower byte. */
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
return tmp;
}
static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
{
struct _fpxreg *st = NULL;
unsigned long twd = (unsigned long) fxsave->twd;
unsigned long tag;
unsigned long ret = 0xffff0000;
int i;
#define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16);
for (i = 0; i < 8; i++) {
if (twd & 0x1) {
st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
switch (st->exponent & 0x7fff) {
case 0x7fff:
tag = 2; /* Special */
break;
case 0x0000:
if ( !st->significand[0] &&
!st->significand[1] &&
!st->significand[2] &&
!st->significand[3] ) {
tag = 1; /* Zero */
} else {
tag = 2; /* Special */
}
break;
default:
if (st->significand[3] & 0x8000) {
tag = 0; /* Valid */
} else {
tag = 2; /* Special */
}
break;
}
} else {
tag = 3; /* Empty */
}
ret |= (tag << (2 * i));
twd = twd >> 1;
}
return ret;
}
static int convert_fxsr_to_user(struct _fpstate __user *buf,
struct user_fxsr_struct *fxsave)
{
unsigned long env[7];
struct _fpreg __user *to;
struct _fpxreg *from;
int i;
env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
env[2] = twd_fxsr_to_i387(fxsave);
env[3] = fxsave->fip;
env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
env[5] = fxsave->foo;
env[6] = fxsave->fos;
if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
return 1;
to = &buf->_st[0];
from = (struct _fpxreg *) &fxsave->st_space[0];
for (i = 0; i < 8; i++, to++, from++) {
unsigned long __user *t = (unsigned long __user *)to;
unsigned long *f = (unsigned long *)from;
if (__put_user(*f, t) ||
__put_user(*(f + 1), t + 1) ||
__put_user(from->exponent, &to->exponent))
return 1;
}
return 0;
}
static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
struct _fpstate __user *buf)
{
unsigned long env[7];
struct _fpxreg *to;
struct _fpreg __user *from;
int i;
if (copy_from_user( env, buf, 7 * sizeof(long)))
return 1;
fxsave->cwd = (unsigned short)(env[0] & 0xffff);
fxsave->swd = (unsigned short)(env[1] & 0xffff);
fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
fxsave->fip = env[3];
fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
fxsave->fcs = (env[4] & 0xffff);
fxsave->foo = env[5];
fxsave->fos = env[6];
to = (struct _fpxreg *) &fxsave->st_space[0];
from = &buf->_st[0];
for (i = 0; i < 8; i++, to++, from++) {
unsigned long *t = (unsigned long *)to;
unsigned long __user *f = (unsigned long __user *)from;
if (__get_user(*t, f) ||
__get_user(*(t + 1), f + 1) ||
__get_user(to->exponent, &from->exponent))
return 1;
}
return 0;
}
extern int have_fpx_regs;
static int copy_sc_from_user(struct pt_regs *regs,
struct sigcontext __user *from)
{
struct sigcontext sc;
int err, pid;
err = copy_from_user(&sc, from, sizeof(sc));
if (err)
return err;
pid = userspace_pid[current_thread_info()->cpu];
copy_sc(&regs->regs, &sc);
if (have_fpx_regs) {
struct user_fxsr_struct fpx;
err = copy_from_user(&fpx,
&((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
sizeof(struct user_fxsr_struct));
if (err)
return 1;
err = convert_fxsr_from_user(&fpx, sc.fpstate);
if (err)
return 1;
err = restore_fpx_registers(pid, (unsigned long *) &fpx);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fpx_registers failed, errno = %d\n",
-err);
return 1;
}
}
else {
struct user_i387_struct fp;
err = copy_from_user(&fp, sc.fpstate,
sizeof(struct user_i387_struct));
if (err)
return 1;
err = restore_fp_registers(pid, (unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fp_registers failed, errno = %d\n",
-err);
return 1;
}
}
return 0;
}
static int copy_sc_to_user(struct sigcontext __user *to,
struct _fpstate __user *to_fp, struct pt_regs *regs,
unsigned long sp)
{
struct sigcontext sc;
struct faultinfo * fi = &current->thread.arch.faultinfo;
int err, pid;
sc.gs = REGS_GS(regs->regs.gp);
sc.fs = REGS_FS(regs->regs.gp);
sc.es = REGS_ES(regs->regs.gp);
sc.ds = REGS_DS(regs->regs.gp);
sc.di = REGS_EDI(regs->regs.gp);
sc.si = REGS_ESI(regs->regs.gp);
sc.bp = REGS_EBP(regs->regs.gp);
sc.sp = sp;
sc.bx = REGS_EBX(regs->regs.gp);
sc.dx = REGS_EDX(regs->regs.gp);
sc.cx = REGS_ECX(regs->regs.gp);
sc.ax = REGS_EAX(regs->regs.gp);
sc.ip = REGS_IP(regs->regs.gp);
sc.cs = REGS_CS(regs->regs.gp);
sc.flags = REGS_EFLAGS(regs->regs.gp);
sc.sp_at_signal = regs->regs.gp[UESP];
sc.ss = regs->regs.gp[SS];
sc.cr2 = fi->cr2;
sc.err = fi->error_code;
sc.trapno = fi->trap_no;
to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
sc.fpstate = to_fp;
pid = userspace_pid[current_thread_info()->cpu];
if (have_fpx_regs) {
struct user_fxsr_struct fpx;
err = save_fpx_registers(pid, (unsigned long *) &fpx);
if (err < 0){
printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
"failed, errno = %d\n", err);
return 1;
}
err = convert_fxsr_to_user(to_fp, &fpx);
if (err)
return 1;
err |= __put_user(fpx.swd, &to_fp->status);
err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
if (err)
return 1;
if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
sizeof(struct user_fxsr_struct)))
return 1;
}
else {
struct user_i387_struct fp;
err = save_fp_registers(pid, (unsigned long *) &fp);
if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
return 1;
}
return copy_to_user(to, &sc, sizeof(sc));
}
static int copy_ucontext_to_user(struct ucontext __user *uc,
struct _fpstate __user *fp, sigset_t *set,
unsigned long sp)
{
int err = 0;
err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, sp);
err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
return err;
}
struct sigframe
{
char __user *pretcode;
int sig;
struct sigcontext sc;
struct _fpstate fpstate;
unsigned long extramask[_NSIG_WORDS-1];
char retcode[8];
};
struct rt_sigframe
{
char __user *pretcode;
int sig;
struct siginfo __user *pinfo;
void __user *puc;
struct siginfo info;
struct ucontext uc;
struct _fpstate fpstate;
char retcode[8];
};
int setup_signal_stack_sc(unsigned long stack_top, int sig,
struct k_sigaction *ka, struct pt_regs *regs,
sigset_t *mask)
{
struct sigframe __user *frame;
void __user *restorer;
unsigned long save_sp = PT_REGS_SP(regs);
int err = 0;
/* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
stack_top = ((stack_top + 4) & -16UL) - 4;
frame = (struct sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
restorer = frame->retcode;
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
/* Update SP now because the page fault handler refuses to extend
* the stack if the faulting address is too far below the current
* SP, which frame now certainly is. If there's an error, the original
* value is restored on the way out.
* When writing the sigcontext to the stack, we have to write the
* original value, so that's passed to copy_sc_to_user, which does
* the right thing with it.
*/
PT_REGS_SP(regs) = (unsigned long) frame;
err |= __put_user(restorer, &frame->pretcode);
err |= __put_user(sig, &frame->sig);
err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp);
err |= __put_user(mask->sig[0], &frame->sc.oldmask);
if (_NSIG_WORDS > 1)
err |= __copy_to_user(&frame->extramask, &mask->sig[1],
sizeof(frame->extramask));
/*
* This is popl %eax ; movl $,%eax ; int $0x80
*
* WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
if (err)
goto err;
PT_REGS_SP(regs) = (unsigned long) frame;
PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
PT_REGS_EAX(regs) = (unsigned long) sig;
PT_REGS_EDX(regs) = (unsigned long) 0;
PT_REGS_ECX(regs) = (unsigned long) 0;
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
ptrace_notify(SIGTRAP);
return 0;
err:
PT_REGS_SP(regs) = save_sp;
return err;
}
int setup_signal_stack_si(unsigned long stack_top, int sig,
struct k_sigaction *ka, struct pt_regs *regs,
siginfo_t *info, sigset_t *mask)
{
struct rt_sigframe __user *frame;
void __user *restorer;
unsigned long save_sp = PT_REGS_SP(regs);
int err = 0;
stack_top &= -8UL;
frame = (struct rt_sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
restorer = frame->retcode;
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
/* See comment above about why this is here */
PT_REGS_SP(regs) = (unsigned long) frame;
err |= __put_user(restorer, &frame->pretcode);
err |= __put_user(sig, &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
save_sp);
/*
* This is movl $,%eax ; int $0x80
*
* WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
if (err)
goto err;
PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
PT_REGS_EAX(regs) = (unsigned long) sig;
PT_REGS_EDX(regs) = (unsigned long) &frame->info;
PT_REGS_ECX(regs) = (unsigned long) &frame->uc;
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
ptrace_notify(SIGTRAP);
return 0;
err:
PT_REGS_SP(regs) = save_sp;
return err;
}
long sys_sigreturn(struct pt_regs regs)
{
unsigned long sp = PT_REGS_SP(&current->thread.regs);
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
unsigned long __user *oldmask = &sc->oldmask;
unsigned long __user *extramask = frame->extramask;
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], extramask, sig_size))
goto segfault;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (copy_sc_from_user(&current->thread.regs, sc))
goto segfault;
/* Avoid ERESTART handling */
PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
return PT_REGS_SYSCALL_RET(&current->thread.regs);
segfault:
force_sig(SIGSEGV, current);
return 0;
}
long sys_rt_sigreturn(struct pt_regs regs)
{
unsigned long sp = PT_REGS_SP(&current->thread.regs);
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *) (sp - 4);
sigset_t set;
struct ucontext __user *uc = &frame->uc;
int sig_size = _NSIG_WORDS * sizeof(unsigned long);
if (copy_from_user(&set, &uc->uc_sigmask, sig_size))
goto segfault;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
goto segfault;
/* Avoid ERESTART handling */
PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
return PT_REGS_SYSCALL_RET(&current->thread.regs);
segfault:
force_sig(SIGSEGV, current);
return 0;
}

View File

@@ -0,0 +1,51 @@
#include "as-layout.h"
.globl syscall_stub
.section .__syscall_stub, "ax"
.globl batch_syscall_stub
batch_syscall_stub:
/* load pointer to first operation */
mov $(STUB_DATA+8), %esp
again:
/* load length of additional data */
mov 0x0(%esp), %eax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %eax, STUB_DATA+4
cmpl $0, %eax
jz done
/* save current pointer */
mov %esp, STUB_DATA+4
/* skip additional data */
add %eax, %esp
/* load syscall-# */
pop %eax
/* load syscall params */
pop %ebx
pop %ecx
pop %edx
pop %esi
pop %edi
pop %ebp
/* execute syscall */
int $0x80
/* check return value */
pop %ebx
cmp %ebx, %eax
je again
done:
/* save return value */
mov %eax, STUB_DATA
/* stop */
int3

View File

@@ -0,0 +1,17 @@
/*
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "sysdep/stub.h"
#include "sysdep/sigcontext.h"
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig)
{
struct sigcontext *sc = (struct sigcontext *) (&sig + 1);
GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc);
trap_myself();
}

View File

@@ -0,0 +1,28 @@
#include <linux/linkage.h>
/* Steal i386 syscall table for our purposes, but with some slight changes.*/
#define sys_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
#define sys_vm86old sys_ni_syscall
#define sys_vm86 sys_ni_syscall
#define old_mmap old_mmap_i386
#define ptregs_fork sys_fork
#define ptregs_execve sys_execve
#define ptregs_iopl sys_iopl
#define ptregs_vm86old sys_vm86old
#define ptregs_sigreturn sys_sigreturn
#define ptregs_clone sys_clone
#define ptregs_vm86 sys_vm86
#define ptregs_rt_sigreturn sys_rt_sigreturn
#define ptregs_sigaltstack sys_sigaltstack
#define ptregs_vfork sys_vfork
.section .rodata,"a"
#include "../../x86/kernel/syscall_table_32.S"
ENTRY(syscall_table_size)
.long .-sys_call_table

View File

@@ -0,0 +1,203 @@
/*
* Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#include "linux/sched.h"
#include "linux/shm.h"
#include "linux/ipc.h"
#include "linux/syscalls.h"
#include "asm/mman.h"
#include "asm/uaccess.h"
#include "asm/unistd.h"
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/i386 didn't use to be able to handle more than
* 4 system call parameters, so these system calls used a memory
* block for parameter passing..
*/
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
extern int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long offset);
long old_mmap_i386(struct mmap_arg_struct __user *arg)
{
struct mmap_arg_struct a;
int err = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
err = old_mmap(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return err;
}
struct sel_arg_struct {
unsigned long n;
fd_set __user *inp;
fd_set __user *outp;
fd_set __user *exp;
struct timeval __user *tvp;
};
long old_select(struct sel_arg_struct __user *arg)
{
struct sel_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
/* sys_select() does the appropriate kernel locking */
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
/*
* The prototype on i386 is:
*
* int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
*
* and the "newtls" arg. on i386 is read by copy_thread directly from the
* register saved on the stack.
*/
long sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tid, void *newtls, int __user *child_tid)
{
long ret;
if (!newsp)
newsp = UPT_SP(&current->thread.regs.regs);
current->thread.forking = 1;
ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
child_tid);
current->thread.forking = 0;
return ret;
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
long sys_ipc (uint call, int first, int second,
int third, void __user *ptr, long fifth)
{
int version, ret;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMOP:
return sys_semtimedop(first, (struct sembuf __user *) ptr,
second, NULL);
case SEMTIMEDOP:
return sys_semtimedop(first, (struct sembuf __user *) ptr,
second,
(const struct timespec __user *) fifth);
case SEMGET:
return sys_semget (first, second, third);
case SEMCTL: {
union semun fourth;
if (!ptr)
return -EINVAL;
if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
case MSGSND:
return sys_msgsnd (first, (struct msgbuf *) ptr,
second, third);
case MSGRCV:
switch (version) {
case 0: {
struct ipc_kludge tmp;
if (!ptr)
return -EINVAL;
if (copy_from_user(&tmp,
(struct ipc_kludge *) ptr,
sizeof (tmp)))
return -EFAULT;
return sys_msgrcv (first, tmp.msgp, second,
tmp.msgtyp, third);
}
default:
panic("msgrcv with version != 0");
return sys_msgrcv (first,
(struct msgbuf *) ptr,
second, fifth, third);
}
case MSGGET:
return sys_msgget ((key_t) first, second);
case MSGCTL:
return sys_msgctl (first, second, (struct msqid_ds *) ptr);
case SHMAT:
switch (version) {
default: {
ulong raddr;
ret = do_shmat (first, (char *) ptr, second, &raddr);
if (ret)
return ret;
return put_user (raddr, (ulong *) third);
}
case 1: /* iBCS2 emulator entry point */
if (!segment_eq(get_fs(), get_ds()))
return -EINVAL;
return do_shmat (first, (char *) ptr, second, (ulong *) third);
}
case SHMDT:
return sys_shmdt ((char *)ptr);
case SHMGET:
return sys_shmget (first, second, third);
case SHMCTL:
return sys_shmctl (first, second,
(struct shmid_ds *) ptr);
default:
return -ENOSYS;
}
}
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}

View File

@@ -0,0 +1,101 @@
/*
* Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#include "linux/kernel.h"
#include "linux/smp.h"
#include "linux/sched.h"
#include "linux/kallsyms.h"
#include "asm/ptrace.h"
#include "sysrq.h"
/* This is declared by <linux/sched.h> */
void show_regs(struct pt_regs *regs)
{
printk("\n");
printk("EIP: %04lx:[<%08lx>] CPU: %d %s",
0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
smp_processor_id(), print_tainted());
if (PT_REGS_CS(regs) & 3)
printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
PT_REGS_SP(regs));
printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs),
print_tainted());
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
PT_REGS_EAX(regs), PT_REGS_EBX(regs),
PT_REGS_ECX(regs),
PT_REGS_EDX(regs));
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
PT_REGS_ESI(regs), PT_REGS_EDI(regs),
PT_REGS_EBP(regs));
printk(" DS: %04lx ES: %04lx\n",
0xffff & PT_REGS_DS(regs),
0xffff & PT_REGS_ES(regs));
show_trace(NULL, (unsigned long *) &regs);
}
/* Copied from i386. */
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
return p > (void *)tinfo &&
p < (void *)tinfo + THREAD_SIZE - 3;
}
/* Adapted from i386 (we also print the address we read from). */
static inline unsigned long print_context_stack(struct thread_info *tinfo,
unsigned long *stack, unsigned long ebp)
{
unsigned long addr;
#ifdef CONFIG_FRAME_POINTER
while (valid_stack_ptr(tinfo, (void *)ebp)) {
addr = *(unsigned long *)(ebp + 4);
printk("%08lx: [<%08lx>]", ebp + 4, addr);
print_symbol(" %s", addr);
printk("\n");
ebp = *(unsigned long *)ebp;
}
#else
while (valid_stack_ptr(tinfo, stack)) {
addr = *stack;
if (__kernel_text_address(addr)) {
printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
print_symbol(" %s", addr);
printk("\n");
}
stack++;
}
#endif
return ebp;
}
void show_trace(struct task_struct* task, unsigned long * stack)
{
unsigned long ebp;
struct thread_info *context;
/* Turn this into BUG_ON if possible. */
if (!stack) {
stack = (unsigned long*) &stack;
printk("show_trace: got NULL stack, implicit assumption task == current");
WARN_ON(1);
}
if (!task)
task = current;
if (task != current) {
ebp = (unsigned long) KSTK_EBP(task);
} else {
asm ("movl %%ebp, %0" : "=r" (ebp) : );
}
context = (struct thread_info *)
((unsigned long)stack & (~(THREAD_SIZE - 1)));
print_context_stack(context, stack, ebp);
printk("\n");
}

View File

@@ -0,0 +1,396 @@
/*
* Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
* Licensed under the GPL
*/
#include "linux/percpu.h"
#include "linux/sched.h"
#include "asm/uaccess.h"
#include "os.h"
#include "skas.h"
#include "sysdep/tls.h"
/*
* If needed we can detect when it's uninitialized.
*
* These are initialized in an initcall and unchanged thereafter.
*/
static int host_supports_tls = -1;
int host_gdt_entry_tls_min;
int do_set_thread_area(struct user_desc *info)
{
int ret;
u32 cpu;
cpu = get_cpu();
ret = os_set_thread_area(info, userspace_pid[cpu]);
put_cpu();
if (ret)
printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
"index = %d\n", ret, info->entry_number);
return ret;
}
int do_get_thread_area(struct user_desc *info)
{
int ret;
u32 cpu;
cpu = get_cpu();
ret = os_get_thread_area(info, userspace_pid[cpu]);
put_cpu();
if (ret)
printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
"index = %d\n", ret, info->entry_number);
return ret;
}
/*
* sys_get_thread_area: get a yet unused TLS descriptor index.
* XXX: Consider leaving one free slot for glibc usage at first place. This must
* be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
*
* Also, this must be tested when compiling in SKAS mode with dynamic linking
* and running against NPTL.
*/
static int get_free_idx(struct task_struct* task)
{
struct thread_struct *t = &task->thread;
int idx;
if (!t->arch.tls_array)
return GDT_ENTRY_TLS_MIN;
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (!t->arch.tls_array[idx].present)
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}
static inline void clear_user_desc(struct user_desc* info)
{
/* Postcondition: LDT_empty(info) returns true. */
memset(info, 0, sizeof(*info));
/*
* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
* indeed an empty user_desc.
*/
info->read_exec_only = 1;
info->seg_not_present = 1;
}
#define O_FORCE 1
static int load_TLS(int flags, struct task_struct *to)
{
int ret = 0;
int idx;
for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
struct uml_tls_struct* curr =
&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
/*
* Actually, now if it wasn't flushed it gets cleared and
* flushed to the host, which will clear it.
*/
if (!curr->present) {
if (!curr->flushed) {
clear_user_desc(&curr->tls);
curr->tls.entry_number = idx;
} else {
WARN_ON(!LDT_empty(&curr->tls));
continue;
}
}
if (!(flags & O_FORCE) && curr->flushed)
continue;
ret = do_set_thread_area(&curr->tls);
if (ret)
goto out;
curr->flushed = 1;
}
out:
return ret;
}
/*
* Verify if we need to do a flush for the new process, i.e. if there are any
* present desc's, only if they haven't been flushed.
*/
static inline int needs_TLS_update(struct task_struct *task)
{
int i;
int ret = 0;
for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
struct uml_tls_struct* curr =
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
/*
* Can't test curr->present, we may need to clear a descriptor
* which had a value.
*/
if (curr->flushed)
continue;
ret = 1;
break;
}
return ret;
}
/*
* On a newly forked process, the TLS descriptors haven't yet been flushed. So
* we mark them as such and the first switch_to will do the job.
*/
void clear_flushed_tls(struct task_struct *task)
{
int i;
for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
struct uml_tls_struct* curr =
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
/*
* Still correct to do this, if it wasn't present on the host it
* will remain as flushed as it was.
*/
if (!curr->present)
continue;
curr->flushed = 0;
}
}
/*
* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
* common host process. So this is needed in SKAS0 too.
*
* However, if each thread had a different host process (and this was discussed
* for SMP support) this won't be needed.
*
* And this will not need be used when (and if) we'll add support to the host
* SKAS patch.
*/
int arch_switch_tls(struct task_struct *to)
{
if (!host_supports_tls)
return 0;
/*
* We have no need whatsoever to switch TLS for kernel threads; beyond
* that, that would also result in us calling os_set_thread_area with
* userspace_pid[cpu] == 0, which gives an error.
*/
if (likely(to->mm))
return load_TLS(O_FORCE, to);
return 0;
}
static int set_tls_entry(struct task_struct* task, struct user_desc *info,
int idx, int flushed)
{
struct thread_struct *t = &task->thread;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
return 0;
}
int arch_copy_tls(struct task_struct *new)
{
struct user_desc info;
int idx, ret = -EFAULT;
if (copy_from_user(&info,
(void __user *) UPT_ESI(&new->thread.regs.regs),
sizeof(info)))
goto out;
ret = -EINVAL;
if (LDT_empty(&info))
goto out;
idx = info.entry_number;
ret = set_tls_entry(new, &info, idx, 0);
out:
return ret;
}
/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
static int get_tls_entry(struct task_struct *task, struct user_desc *info,
int idx)
{
struct thread_struct *t = &task->thread;
if (!t->arch.tls_array)
goto clear;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
goto clear;
*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
out:
/*
* Temporary debugging check, to make sure that things have been
* flushed. This could be triggered if load_TLS() failed.
*/
if (unlikely(task == current &&
!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
printk(KERN_ERR "get_tls_entry: task with pid %d got here "
"without flushed TLS.", current->pid);
}
return 0;
clear:
/*
* When the TLS entry has not been set, the values read to user in the
* tls_array are 0 (because it's cleared at boot, see
* arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
*/
clear_user_desc(info);
info->entry_number = idx;
goto out;
}
int sys_set_thread_area(struct user_desc __user *user_desc)
{
struct user_desc info;
int idx, ret;
if (!host_supports_tls)
return -ENOSYS;
if (copy_from_user(&info, user_desc, sizeof(info)))
return -EFAULT;
idx = info.entry_number;
if (idx == -1) {
idx = get_free_idx(current);
if (idx < 0)
return idx;
info.entry_number = idx;
/* Tell the user which slot we chose for him.*/
if (put_user(idx, &user_desc->entry_number))
return -EFAULT;
}
ret = do_set_thread_area(&info);
if (ret)
return ret;
return set_tls_entry(current, &info, idx, 1);
}
/*
* Perform set_thread_area on behalf of the traced child.
* Note: error handling is not done on the deferred load, and this differ from
* i386. However the only possible error are caused by bugs.
*/
int ptrace_set_thread_area(struct task_struct *child, int idx,
struct user_desc __user *user_desc)
{
struct user_desc info;
if (!host_supports_tls)
return -EIO;
if (copy_from_user(&info, user_desc, sizeof(info)))
return -EFAULT;
return set_tls_entry(child, &info, idx, 0);
}
int sys_get_thread_area(struct user_desc __user *user_desc)
{
struct user_desc info;
int idx, ret;
if (!host_supports_tls)
return -ENOSYS;
if (get_user(idx, &user_desc->entry_number))
return -EFAULT;
ret = get_tls_entry(current, &info, idx);
if (ret < 0)
goto out;
if (copy_to_user(user_desc, &info, sizeof(info)))
ret = -EFAULT;
out:
return ret;
}
/*
* Perform get_thread_area on behalf of the traced child.
*/
int ptrace_get_thread_area(struct task_struct *child, int idx,
struct user_desc __user *user_desc)
{
struct user_desc info;
int ret;
if (!host_supports_tls)
return -EIO;
ret = get_tls_entry(child, &info, idx);
if (ret < 0)
goto out;
if (copy_to_user(user_desc, &info, sizeof(info)))
ret = -EFAULT;
out:
return ret;
}
/*
* This code is really i386-only, but it detects and logs x86_64 GDT indexes
* if a 32-bit UML is running on a 64-bit host.
*/
static int __init __setup_host_supports_tls(void)
{
check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
if (host_supports_tls) {
printk(KERN_INFO "Host TLS support detected\n");
printk(KERN_INFO "Detected host type: ");
switch (host_gdt_entry_tls_min) {
case GDT_ENTRY_TLS_MIN_I386:
printk(KERN_CONT "i386");
break;
case GDT_ENTRY_TLS_MIN_X86_64:
printk(KERN_CONT "x86_64");
break;
}
printk(KERN_CONT " (GDT indexes %d to %d)\n",
host_gdt_entry_tls_min,
host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
} else
printk(KERN_ERR " Host TLS support NOT detected! "
"TLS support inside UML will not work\n");
return 0;
}
__initcall(__setup_host_supports_tls);

View File

@@ -0,0 +1,53 @@
#include <stdio.h>
#include <stddef.h>
#include <signal.h>
#include <sys/poll.h>
#include <sys/user.h>
#include <sys/mman.h>
#include <asm/ptrace.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define DEFINE_LONGS(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
void foo(void)
{
OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
OFFSET(HOST_SC_ERR, sigcontext, err);
OFFSET(HOST_SC_CR2, sigcontext, cr2);
DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
DEFINE(HOST_IP, EIP);
DEFINE(HOST_SP, UESP);
DEFINE(HOST_EFLAGS, EFL);
DEFINE(HOST_EAX, EAX);
DEFINE(HOST_EBX, EBX);
DEFINE(HOST_ECX, ECX);
DEFINE(HOST_EDX, EDX);
DEFINE(HOST_ESI, ESI);
DEFINE(HOST_EDI, EDI);
DEFINE(HOST_EBP, EBP);
DEFINE(HOST_CS, CS);
DEFINE(HOST_SS, SS);
DEFINE(HOST_DS, DS);
DEFINE(HOST_FS, FS);
DEFINE(HOST_ES, ES);
DEFINE(HOST_GS, GS);
DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
/* XXX Duplicated between i386 and x86_64 */
DEFINE(UM_POLLIN, POLLIN);
DEFINE(UM_POLLPRI, POLLPRI);
DEFINE(UM_POLLOUT, POLLOUT);
DEFINE(UM_PROT_READ, PROT_READ);
DEFINE(UM_PROT_WRITE, PROT_WRITE);
DEFINE(UM_PROT_EXEC, PROT_EXEC);
}