add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
#
# Makefile for the linux s390-specific parts of the memory manager.
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
page-states.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o

479
kernel/arch/s390/mm/cmm.c Normal file
View File

@@ -0,0 +1,479 @@
/*
* arch/s390/mm/cmm.c
*
* S390 version
* Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Collaborative memory management interface.
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/diag.h>
static char *sender = "VMRMSVM";
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender,
"Guest name that may send SMSG messages (default VMRMSVM)");
#include "../../../drivers/s390/net/smsgiucv.h"
#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
struct cmm_page_array {
struct cmm_page_array *next;
unsigned long index;
unsigned long pages[CMM_NR_PAGES];
};
static long cmm_pages;
static long cmm_timed_pages;
static volatile long cmm_pages_target;
static volatile long cmm_timed_pages_target;
static long cmm_timeout_pages;
static long cmm_timeout_seconds;
static struct cmm_page_array *cmm_page_list;
static struct cmm_page_array *cmm_timed_page_list;
static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static wait_queue_head_t cmm_thread_wait;
static struct timer_list cmm_timer;
static void cmm_timer_fn(unsigned long);
static void cmm_set_timer(void);
static long
cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa, *npa;
unsigned long addr;
while (nr) {
addr = __get_free_page(GFP_NOIO);
if (!addr)
break;
spin_lock(&cmm_lock);
pa = *list;
if (!pa || pa->index >= CMM_NR_PAGES) {
/* Need a new page for the page list. */
spin_unlock(&cmm_lock);
npa = (struct cmm_page_array *)
__get_free_page(GFP_NOIO);
if (!npa) {
free_page(addr);
break;
}
spin_lock(&cmm_lock);
pa = *list;
if (!pa || pa->index >= CMM_NR_PAGES) {
npa->next = pa;
npa->index = 0;
pa = npa;
*list = pa;
} else
free_page((unsigned long) npa);
}
diag10(addr);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
}
return nr;
}
static long
cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
spin_lock(&cmm_lock);
pa = *list;
while (nr) {
if (!pa || pa->index <= 0)
break;
addr = pa->pages[--pa->index];
if (pa->index == 0) {
pa = pa->next;
free_page((unsigned long) *list);
*list = pa;
}
free_page(addr);
(*counter)--;
nr--;
}
spin_unlock(&cmm_lock);
return nr;
}
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
unsigned long *freed = parm;
long nr = 256;
nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list);
if (nr > 0)
nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list);
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
*freed += 256 - nr;
return NOTIFY_OK;
}
static struct notifier_block cmm_oom_nb = {
.notifier_call = cmm_oom_notify
};
static int
cmm_thread(void *dummy)
{
int rc;
while (1) {
rc = wait_event_interruptible(cmm_thread_wait,
(cmm_pages != cmm_pages_target ||
cmm_timed_pages != cmm_timed_pages_target ||
kthread_should_stop()));
if (kthread_should_stop() || rc == -ERESTARTSYS) {
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
break;
}
if (cmm_pages_target > cmm_pages) {
if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
cmm_pages_target = cmm_pages;
} else if (cmm_pages_target < cmm_pages) {
cmm_free_pages(1, &cmm_pages, &cmm_page_list);
}
if (cmm_timed_pages_target > cmm_timed_pages) {
if (cmm_alloc_pages(1, &cmm_timed_pages,
&cmm_timed_page_list))
cmm_timed_pages_target = cmm_timed_pages;
} else if (cmm_timed_pages_target < cmm_timed_pages) {
cmm_free_pages(1, &cmm_timed_pages,
&cmm_timed_page_list);
}
if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
cmm_set_timer();
}
return 0;
}
static void
cmm_kick_thread(void)
{
wake_up(&cmm_thread_wait);
}
static void
cmm_set_timer(void)
{
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
if (timer_pending(&cmm_timer))
del_timer(&cmm_timer);
return;
}
if (timer_pending(&cmm_timer)) {
if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
return;
}
cmm_timer.function = cmm_timer_fn;
cmm_timer.data = 0;
cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
add_timer(&cmm_timer);
}
static void
cmm_timer_fn(unsigned long ignored)
{
long nr;
nr = cmm_timed_pages_target - cmm_timeout_pages;
if (nr < 0)
cmm_timed_pages_target = 0;
else
cmm_timed_pages_target = nr;
cmm_kick_thread();
cmm_set_timer();
}
void
cmm_set_pages(long nr)
{
cmm_pages_target = nr;
cmm_kick_thread();
}
long
cmm_get_pages(void)
{
return cmm_pages;
}
void
cmm_add_timed_pages(long nr)
{
cmm_timed_pages_target += nr;
cmm_kick_thread();
}
long
cmm_get_timed_pages(void)
{
return cmm_timed_pages;
}
void
cmm_set_timeout(long nr, long seconds)
{
cmm_timeout_pages = nr;
cmm_timeout_seconds = seconds;
cmm_set_timer();
}
static int
cmm_skip_blanks(char *cp, char **endp)
{
char *str;
for (str = cp; *str == ' ' || *str == '\t'; str++);
*endp = str;
return str != cp;
}
#ifdef CONFIG_CMM_PROC
static struct ctl_table cmm_table[];
static int
cmm_pages_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
long nr;
int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write) {
len = *lenp;
if (copy_from_user(buf, buffer,
len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
if (ctl == &cmm_table[0])
cmm_set_pages(nr);
else
cmm_add_timed_pages(nr);
} else {
if (ctl == &cmm_table[0])
nr = cmm_get_pages();
else
nr = cmm_get_timed_pages();
len = sprintf(buf, "%ld\n", nr);
if (len > *lenp)
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
}
*lenp = len;
*ppos += len;
return 0;
}
static int
cmm_timeout_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write) {
len = *lenp;
if (copy_from_user(buf, buffer,
len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
if (len > *lenp)
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
}
*lenp = len;
*ppos += len;
return 0;
}
static struct ctl_table cmm_table[] = {
{
.procname = "cmm_pages",
.mode = 0644,
.proc_handler = &cmm_pages_handler,
},
{
.procname = "cmm_timed_pages",
.mode = 0644,
.proc_handler = &cmm_pages_handler,
},
{
.procname = "cmm_timeout",
.mode = 0644,
.proc_handler = &cmm_timeout_handler,
},
{ .ctl_name = 0 }
};
static struct ctl_table cmm_dir_table[] = {
{
.ctl_name = CTL_VM,
.procname = "vm",
.maxlen = 0,
.mode = 0555,
.child = cmm_table,
},
{ .ctl_name = 0 }
};
#endif
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
static void
cmm_smsg_target(char *from, char *msg)
{
long nr, seconds;
if (strlen(sender) > 0 && strcmp(from, sender) != 0)
return;
if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
return;
if (strncmp(msg, "SHRINK", 6) == 0) {
if (!cmm_skip_blanks(msg + 6, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_pages(nr);
} else if (strncmp(msg, "RELEASE", 7) == 0) {
if (!cmm_skip_blanks(msg + 7, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_add_timed_pages(nr);
} else if (strncmp(msg, "REUSE", 5) == 0) {
if (!cmm_skip_blanks(msg + 5, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
if (!cmm_skip_blanks(msg, &msg))
return;
seconds = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_timeout(nr, seconds);
}
}
#endif
static struct ctl_table_header *cmm_sysctl_header;
static int
cmm_init (void)
{
int rc = -ENOMEM;
#ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
if (!cmm_sysctl_header)
goto out;
#endif
#ifdef CONFIG_CMM_IUCV
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
if (rc < 0)
goto out_smsg;
#endif
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_oom_notify;
init_waitqueue_head(&cmm_thread_wait);
init_timer(&cmm_timer);
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
if (!rc)
goto out;
/*
* kthread_create failed. undo all the stuff from above again.
*/
unregister_oom_notifier(&cmm_oom_nb);
out_oom_notify:
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
out_smsg:
#endif
#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
#endif
out:
return rc;
}
static void
cmm_exit(void)
{
kthread_stop(cmm_thread_ptr);
unregister_oom_notifier(&cmm_oom_nb);
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
#endif
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
}
module_init(cmm_init);
module_exit(cmm_exit);
EXPORT_SYMBOL(cmm_set_pages);
EXPORT_SYMBOL(cmm_get_pages);
EXPORT_SYMBOL(cmm_add_timed_pages);
EXPORT_SYMBOL(cmm_get_timed_pages);
EXPORT_SYMBOL(cmm_set_timeout);
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,779 @@
/*
* File...........: arch/s390/mm/extmem.c
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
* Rob M van der Heij <rvdheij@nl.ibm.com>
* Steven Shultz <shultzss@us.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation 2002-2004
*/
#define KMSG_COMPONENT "extmem"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bootmem.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
#include <asm/cpcmd.h>
#include <asm/setup.h>
#define DCSS_LOADSHR 0x00
#define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08
#define DCSS_FINDSEG 0x0c
#define DCSS_LOADNOLY 0x10
#define DCSS_SEGEXT 0x18
#define DCSS_LOADSHRX 0x20
#define DCSS_LOADNSRX 0x24
#define DCSS_FINDSEGX 0x2c
#define DCSS_SEGEXTX 0x38
#define DCSS_FINDSEGA 0x0c
struct qrange {
unsigned long start; /* last byte type */
unsigned long end; /* last byte reserved */
};
struct qout64 {
unsigned long segstart;
unsigned long segend;
int segcnt;
int segrcnt;
struct qrange range[6];
};
#ifdef CONFIG_64BIT
struct qrange_old {
unsigned int start; /* last byte type */
unsigned int end; /* last byte reserved */
};
/* output area format for the Diag x'64' old subcode x'18' */
struct qout64_old {
int segstart;
int segend;
int segcnt;
int segrcnt;
struct qrange_old range[6];
};
#endif
struct qin64 {
char qopcode;
char rsrv1[3];
char qrcode;
char rsrv2[3];
char qname[8];
unsigned int qoutptr;
short int qoutlen;
};
struct dcss_segment {
struct list_head list;
char dcss_name[8];
char res_name[15];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
int do_nonshared;
unsigned int vm_segtype;
struct qrange range[6];
int segcnt;
struct resource *res;
};
static DEFINE_MUTEX(dcss_lock);
static LIST_HEAD(dcss_list);
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
"EW/EN-MIXED" };
static int loadshr_scode, loadnsr_scode, findseg_scode;
static int segext_scode, purgeseg_scode;
static int scode_set;
/* set correct Diag x'64' subcodes. */
static int
dcss_set_subcodes(void)
{
#ifdef CONFIG_64BIT
char *name = kmalloc(8 * sizeof(char), GFP_DMA);
unsigned long rx, ry;
int rc;
if (name == NULL)
return -ENOMEM;
rx = (unsigned long) name;
ry = DCSS_FINDSEGX;
strcpy(name, "dummy");
asm volatile(
" diag %0,%1,0x64\n"
"0: ipm %2\n"
" srl %2,28\n"
" j 2f\n"
"1: la %2,3\n"
"2:\n"
EX_TABLE(0b, 1b)
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
kfree(name);
/* Diag x'64' new subcodes are supported, set to new subcodes */
if (rc != 3) {
loadshr_scode = DCSS_LOADSHRX;
loadnsr_scode = DCSS_LOADNSRX;
purgeseg_scode = DCSS_PURGESEG;
findseg_scode = DCSS_FINDSEGX;
segext_scode = DCSS_SEGEXTX;
return 0;
}
#endif
/* Diag x'64' new subcodes are not supported, set to old subcodes */
loadshr_scode = DCSS_LOADNOLY;
loadnsr_scode = DCSS_LOADNSR;
purgeseg_scode = DCSS_PURGESEG;
findseg_scode = DCSS_FINDSEG;
segext_scode = DCSS_SEGEXT;
return 0;
}
/*
* Create the 8 bytes, ebcdic VM segment name from
* an ascii name.
*/
static void
dcss_mkname(char *name, char *dcss_name)
{
int i;
for (i = 0; i < 8; i++) {
if (name[i] == '\0')
break;
dcss_name[i] = toupper(name[i]);
};
for (; i < 8; i++)
dcss_name[i] = ' ';
ASCEBC(dcss_name, 8);
}
/*
* search all segments in dcss_list, and return the one
* namend *name. If not found, return NULL.
*/
static struct dcss_segment *
segment_by_name (char *name)
{
char dcss_name[9];
struct list_head *l;
struct dcss_segment *tmp, *retval = NULL;
BUG_ON(!mutex_is_locked(&dcss_lock));
dcss_mkname (name, dcss_name);
list_for_each (l, &dcss_list) {
tmp = list_entry (l, struct dcss_segment, list);
if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
retval = tmp;
break;
}
}
return retval;
}
/*
* Perform a function on a dcss segment.
*/
static inline int
dcss_diag(int *func, void *parameter,
unsigned long *ret1, unsigned long *ret2)
{
unsigned long rx, ry;
int rc;
if (scode_set == 0) {
rc = dcss_set_subcodes();
if (rc < 0)
return rc;
scode_set = 1;
}
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
#ifdef CONFIG_64BIT
/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
if (*func > DCSS_SEGEXT)
asm volatile(
" diag %0,%1,0x64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
/* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
else
asm volatile(
" sam31\n"
" diag %0,%1,0x64\n"
" sam64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
#else
asm volatile(
" diag %0,%1,0x64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
#endif
*ret1 = rx;
*ret2 = ry;
return rc;
}
static inline int
dcss_diag_translate_rc (int vm_rc) {
if (vm_rc == 44)
return -ENOENT;
return -EIO;
}
/* do a diag to get info about a segment.
* fills start_address, end and vm_segtype fields
*/
static int
query_segment_type (struct dcss_segment *seg)
{
struct qin64 *qin = kmalloc (sizeof(struct qin64), GFP_DMA);
struct qout64 *qout = kmalloc (sizeof(struct qout64), GFP_DMA);
int diag_cc, rc, i;
unsigned long dummy, vmrc;
if ((qin == NULL) || (qout == NULL)) {
rc = -ENOMEM;
goto out_free;
}
/* initialize diag input parameters */
qin->qopcode = DCSS_FINDSEGA;
qin->qoutptr = (unsigned long) qout;
qin->qoutlen = sizeof(struct qout64);
memcpy (qin->qname, seg->dcss_name, 8);
diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc);
if (diag_cc < 0) {
rc = diag_cc;
goto out_free;
}
if (diag_cc > 1) {
pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc);
goto out_free;
}
#ifdef CONFIG_64BIT
/* Only old format of output area of Diagnose x'64' is supported,
copy data for the new format. */
if (segext_scode == DCSS_SEGEXT) {
struct qout64_old *qout_old;
qout_old = kzalloc(sizeof(struct qout64_old), GFP_DMA);
if (qout_old == NULL) {
rc = -ENOMEM;
goto out_free;
}
memcpy(qout_old, qout, sizeof(struct qout64_old));
qout->segstart = (unsigned long) qout_old->segstart;
qout->segend = (unsigned long) qout_old->segend;
qout->segcnt = qout_old->segcnt;
qout->segrcnt = qout_old->segrcnt;
if (qout->segcnt > 6)
qout->segrcnt = 6;
for (i = 0; i < qout->segrcnt; i++) {
qout->range[i].start =
(unsigned long) qout_old->range[i].start;
qout->range[i].end =
(unsigned long) qout_old->range[i].end;
}
kfree(qout_old);
}
#endif
if (qout->segcnt > 6) {
rc = -ENOTSUPP;
goto out_free;
}
if (qout->segcnt == 1) {
seg->vm_segtype = qout->range[0].start & 0xff;
} else {
/* multi-part segment. only one type supported here:
- all parts are contiguous
- all parts are either EW or EN type
- maximum 6 parts allowed */
unsigned long start = qout->segstart >> PAGE_SHIFT;
for (i=0; i<qout->segcnt; i++) {
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
rc = -ENOTSUPP;
goto out_free;
}
if (start != qout->range[i].start >> PAGE_SHIFT) {
rc = -ENOTSUPP;
goto out_free;
}
start = (qout->range[i].end >> PAGE_SHIFT) + 1;
}
seg->vm_segtype = SEG_TYPE_EWEN;
}
/* analyze diag output and update seg */
seg->start_addr = qout->segstart;
seg->end = qout->segend;
memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
seg->segcnt = qout->segcnt;
rc = 0;
out_free:
kfree(qin);
kfree(qout);
return rc;
}
/*
* get info about a segment
* possible return values:
* -ENOSYS : we are not running on VM
* -EIO : could not perform query diagnose
* -ENOENT : no such segment
* -ENOTSUPP: multi-part segment cannot be used with linux
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
int
segment_type (char* name)
{
int rc;
struct dcss_segment seg;
if (!MACHINE_IS_VM)
return -ENOSYS;
dcss_mkname(name, seg.dcss_name);
rc = query_segment_type (&seg);
if (rc < 0)
return rc;
return seg.vm_segtype;
}
/*
* check if segment collides with other segments that are currently loaded
* returns 1 if this is the case, 0 if no collision was found
*/
static int
segment_overlaps_others (struct dcss_segment *seg)
{
struct list_head *l;
struct dcss_segment *tmp;
BUG_ON(!mutex_is_locked(&dcss_lock));
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if ((tmp->start_addr >> 20) > (seg->end >> 20))
continue;
if ((tmp->end >> 20) < (seg->start_addr >> 20))
continue;
if (seg == tmp)
continue;
return 1;
}
return 0;
}
/*
* real segment loading function, called from segment_load
*/
static int
__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
{
struct dcss_segment *seg = kmalloc(sizeof(struct dcss_segment),
GFP_DMA);
int rc, diag_cc;
unsigned long start_addr, end_addr, dummy;
if (seg == NULL) {
rc = -ENOMEM;
goto out;
}
dcss_mkname (name, seg->dcss_name);
rc = query_segment_type (seg);
if (rc < 0)
goto out_free;
if (loadshr_scode == DCSS_LOADSHRX) {
if (segment_overlaps_others(seg)) {
rc = -EBUSY;
goto out_free;
}
}
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
if (rc)
goto out_free;
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) {
rc = -ENOMEM;
goto out_shared;
}
seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
seg->res->start = seg->start_addr;
seg->res->end = seg->end;
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
strncat(seg->res_name, " (DCSS)", 7);
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) {
rc = -EBUSY;
kfree(seg->res);
goto out_shared;
}
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
else
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
&start_addr, &end_addr);
if (diag_cc < 0) {
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
rc = diag_cc;
goto out_resource;
}
if (diag_cc > 1) {
pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
end_addr);
rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
goto out_resource;
}
seg->start_addr = start_addr;
seg->end = end_addr;
seg->do_nonshared = do_nonshared;
atomic_set(&seg->ref_count, 1);
list_add(&seg->list, &dcss_list);
*addr = seg->start_addr;
*end = seg->end;
if (do_nonshared)
pr_info("DCSS %s of range %p to %p and type %s loaded as "
"exclusive-writable\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
else {
pr_info("DCSS %s of range %p to %p and type %s loaded in "
"shared access mode\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
}
goto out;
out_resource:
release_resource(seg->res);
kfree(seg->res);
out_shared:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
out_free:
kfree(seg);
out:
return rc;
}
/*
* this function loads a DCSS segment
* name : name of the DCSS
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
* 1 indicates that the dcss should be exclusive for this linux image
* addr : will be filled with start address of the segment
* end : will be filled with end address of the segment
* return values:
* -ENOSYS : we are not running on VM
* -EIO : could not perform query or load diagnose
* -ENOENT : no such segment
* -ENOTSUPP: multi-part segment cannot be used with linux
* -ENOSPC : segment cannot be used (overlaps with storage)
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* -ERANGE : segment cannot be used (exceeds kernel mapping range)
* -EPERM : segment is currently loaded with incompatible permissions
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
int
segment_load (char *name, int do_nonshared, unsigned long *addr,
unsigned long *end)
{
struct dcss_segment *seg;
int rc;
if (!MACHINE_IS_VM)
return -ENOSYS;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL)
rc = __segment_load (name, do_nonshared, addr, end);
else {
if (do_nonshared == seg->do_nonshared) {
atomic_inc(&seg->ref_count);
*addr = seg->start_addr;
*end = seg->end;
rc = seg->vm_segtype;
} else {
*addr = *end = 0;
rc = -EPERM;
}
}
mutex_unlock(&dcss_lock);
return rc;
}
/*
* this function modifies the shared state of a DCSS segment. note that
* name : name of the DCSS
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
* 1 indicates that the dcss should be exclusive for this linux image
* return values:
* -EIO : could not perform load diagnose (segment gone!)
* -ENOENT : no such segment (segment gone!)
* -EAGAIN : segment is in use by other exploiters, try later
* -EINVAL : no segment with the given name is currently loaded - name invalid
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* 0 : operation succeeded
*/
int
segment_modify_shared (char *name, int do_nonshared)
{
struct dcss_segment *seg;
unsigned long start_addr, end_addr, dummy;
int rc, diag_cc;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
rc = -EINVAL;
goto out_unlock;
}
if (do_nonshared == seg->do_nonshared) {
pr_info("DCSS %s is already in the requested access "
"mode\n", name);
rc = 0;
goto out_unlock;
}
if (atomic_read (&seg->ref_count) != 1) {
pr_warning("DCSS %s is in use and cannot be reloaded\n",
name);
rc = -EAGAIN;
goto out_unlock;
}
release_resource(seg->res);
if (do_nonshared)
seg->res->flags &= ~IORESOURCE_READONLY;
else
if (seg->vm_segtype == SEG_TYPE_SR ||
seg->vm_segtype == SEG_TYPE_ER)
seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) {
pr_warning("DCSS %s overlaps with used memory resources "
"and cannot be reloaded\n", name);
rc = -EBUSY;
kfree(seg->res);
goto out_del_mem;
}
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
else
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
&start_addr, &end_addr);
if (diag_cc < 0) {
rc = diag_cc;
goto out_del_res;
}
if (diag_cc > 1) {
pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
end_addr);
rc = dcss_diag_translate_rc(end_addr);
goto out_del_res;
}
seg->start_addr = start_addr;
seg->end = end_addr;
seg->do_nonshared = do_nonshared;
rc = 0;
goto out_unlock;
out_del_res:
release_resource(seg->res);
kfree(seg->res);
out_del_mem:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
return rc;
}
/*
* Decrease the use count of a DCSS segment and remove
* it from the address space if nobody is using it
* any longer.
*/
void
segment_unload(char *name)
{
unsigned long dummy;
struct dcss_segment *seg;
if (!MACHINE_IS_VM)
return;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
pr_err("Unloading unknown DCSS %s failed\n", name);
goto out_unlock;
}
if (atomic_dec_return(&seg->ref_count) != 0)
goto out_unlock;
release_resource(seg->res);
kfree(seg->res);
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
}
/*
* save segment content permanently
*/
void
segment_save(char *name)
{
struct dcss_segment *seg;
int startpfn = 0;
int endpfn = 0;
char cmd1[160];
char cmd2[80];
int i, response;
if (!MACHINE_IS_VM)
return;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
pr_err("Saving unknown DCSS %s failed\n", name);
goto out;
}
startpfn = seg->start_addr >> PAGE_SHIFT;
endpfn = (seg->end) >> PAGE_SHIFT;
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
seg->range[i].start >> PAGE_SHIFT,
seg->range[i].end >> PAGE_SHIFT,
segtype_string[seg->range[i].start & 0xff]);
}
sprintf(cmd2, "SAVESEG %s", name);
response = 0;
cpcmd(cmd1, NULL, 0, &response);
if (response) {
pr_err("Saving a DCSS failed with DEFSEG response code "
"%i\n", response);
goto out;
}
cpcmd(cmd2, NULL, 0, &response);
if (response) {
pr_err("Saving a DCSS failed with SAVESEG response code "
"%i\n", response);
goto out;
}
out:
mutex_unlock(&dcss_lock);
}
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
void segment_warning(int rc, char *seg_name)
{
switch (rc) {
case -ENOENT:
pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
break;
case -ENOSYS:
pr_err("DCSS %s cannot be loaded or queried without "
"z/VM\n", seg_name);
break;
case -EIO:
pr_err("Loading or querying DCSS %s resulted in a "
"hardware error\n", seg_name);
break;
case -ENOTSUPP:
pr_err("DCSS %s has multiple page ranges and cannot be "
"loaded or queried\n", seg_name);
break;
case -ENOSPC:
pr_err("DCSS %s overlaps with used storage and cannot "
"be loaded\n", seg_name);
break;
case -EBUSY:
pr_err("%s needs used memory resources and cannot be "
"loaded or queried\n", seg_name);
break;
case -EPERM:
pr_err("DCSS %s is already loaded in a different access "
"mode\n", seg_name);
break;
case -ENOMEM:
pr_err("There is not enough memory to load or query "
"DCSS %s\n", seg_name);
break;
case -ERANGE:
pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
"and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
break;
default:
break;
}
}
EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_save);
EXPORT_SYMBOL(segment_type);
EXPORT_SYMBOL(segment_modify_shared);
EXPORT_SYMBOL(segment_warning);

595
kernel/arch/s390/mm/fault.c Normal file
View File

@@ -0,0 +1,595 @@
/*
* arch/s390/mm/fault.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/s390_ext.h>
#include <asm/mmu_context.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
#define __FIXUP_MASK 0x7fffffff
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __FIXUP_MASK ~0L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
#ifdef CONFIG_SYSCTL
extern int sysctl_userprocess_debug;
#endif
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, long err)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, long err)
{
return 0;
}
#endif
/*
* Unlock any spinlocks which will prevent us from getting the
* message out.
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
int loglevel_save = console_loglevel;
console_unblank();
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15;
printk(" ");
console_loglevel = loglevel_save;
}
}
/*
* Returns the address space associated with the fault.
* Returns 0 for kernel space, 1 for user space and
* 2 for code execution in user space with noexec=on.
*/
static inline int check_space(struct task_struct *tsk)
{
/*
* The lowest two bits of S390_lowcore.trans_exc_code
* indicate which paging table was used.
*/
int desc = S390_lowcore.trans_exc_code & 3;
if (desc == 3) /* Home Segment Table Descriptor */
return switch_amode == 0;
if (desc == 2) /* Secondary Segment Table Descriptor */
return tsk->thread.mm_segment.ar4;
#ifdef CONFIG_S390_SWITCH_AMODE
if (unlikely(desc == 1)) { /* STD determined via access register */
/* %a0 always indicates primary space. */
if (S390_lowcore.exc_access_id != 0) {
save_access_regs(tsk->thread.acrs);
/*
* An alet of 0 indicates primary space.
* An alet of 1 indicates secondary space.
* Any other alet values generate an
* alen-translation exception.
*/
if (tsk->thread.acrs[S390_lowcore.exc_access_id])
return tsk->thread.mm_segment.ar4;
}
}
#endif
/* Primary Segment Table Descriptor */
return switch_amode << s390_noexec;
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
int si_code, unsigned long address)
{
struct siginfo si;
#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
#if defined(CONFIG_SYSCTL)
if (sysctl_userprocess_debug)
#endif
{
printk("User process fault: interruption code 0x%lX\n",
error_code);
printk("failing address: %lX\n", address);
show_regs(regs);
}
#endif
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &si, current);
}
static void do_no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
const struct exception_table_entry *fixup;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if (check_space(current) == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
die("Oops", regs, error_code);
do_exit(SIGKILL);
}
static void do_low_address(struct pt_regs *regs, unsigned long error_code)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
die ("Low-address protection", regs, error_code);
do_exit(SIGKILL);
}
do_no_context(regs, error_code, 0);
}
static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
tsk->thread.prot_addr = address;
tsk->thread.trap_no = error_code;
force_sig(SIGBUS, tsk);
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, error_code, address);
}
#ifdef CONFIG_S390_EXEC_PROTECT
static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
unsigned long address, unsigned long error_code)
{
u16 instruction;
int rc;
#ifdef CONFIG_COMPAT
int compat;
#endif
pagefault_disable();
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
pagefault_enable();
if (rc)
return -EFAULT;
up_read(&mm->mmap_sem);
clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
#ifdef CONFIG_COMPAT
compat = is_compat_task();
if (compat && instruction == 0x0a77)
sys32_sigreturn();
else if (compat && instruction == 0x0aad)
sys32_rt_sigreturn();
else
#endif
if (instruction == 0x0a77)
sys_sigreturn();
else if (instruction == 0x0aad)
sys_rt_sigreturn();
else {
current->thread.prot_addr = address;
current->thread.trap_no = error_code;
do_sigsegv(regs, error_code, SEGV_MAPERR, address);
}
return 0;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* error_code:
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
static inline void
do_exception(struct pt_regs *regs, unsigned long error_code, int write)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
int space;
int si_code;
int fault;
if (notify_page_fault(regs, error_code))
return;
tsk = current;
mm = tsk->mm;
/* get the failing address and the affected space */
address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
space = check_space(tsk);
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
if (unlikely(space == 0 || in_atomic() || !mm))
goto no_context;
/*
* When we get here, the fault happened in the current
* task's user address space, so we can switch on the
* interrupts again and then search the VMAs
*/
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
si_code = SEGV_MAPERR;
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
#ifdef CONFIG_S390_EXEC_PROTECT
if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
if (!signal_return(mm, regs, address, error_code))
/*
* signal_return() has done an up_read(&mm->mmap_sem)
* if it returns 0.
*/
return;
#endif
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
if (!write) {
/* page not present, check vm flags */
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
} else {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
}
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
up_read(&mm->mmap_sem);
pagefault_out_of_memory();
return;
} else if (fault & VM_FAULT_SIGBUS) {
do_sigbus(regs, error_code, address);
return;
}
BUG();
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
tsk->thread.prot_addr = address;
tsk->thread.trap_no = error_code;
do_sigsegv(regs, error_code, si_code, address);
return;
}
no_context:
do_no_context(regs, error_code, address);
}
void __kprobes do_protection_exception(struct pt_regs *regs,
long error_code)
{
/* Protection exception is supressing, decrement psw address. */
regs->psw.addr -= (error_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
do_low_address(regs, error_code);
return;
}
do_exception(regs, 4, 1);
}
void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
{
do_exception(regs, error_code & 0xff, 0);
}
#ifdef CONFIG_64BIT
void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
{
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
int space;
mm = current->mm;
address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
space = check_space(current);
if (unlikely(space == 0 || in_atomic() || !mm))
goto no_context;
local_irq_enable();
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
up_read(&mm->mmap_sem);
if (vma) {
update_mm(mm, current);
return;
}
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
current->thread.prot_addr = address;
current->thread.trap_no = error_code;
do_sigsegv(regs, error_code, SEGV_MAPERR, address);
return;
}
no_context:
do_no_context(regs, error_code, address);
}
#endif
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
static ext_int_info_t ext_int_pfault;
static int pfault_disable = 0;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
__setup("nopfault", nopfault);
typedef struct {
__u16 refdiagc;
__u16 reffcode;
__u16 refdwlen;
__u16 refversn;
__u64 refgaddr;
__u64 refselmk;
__u64 refcmpmk;
__u64 reserved;
} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
int pfault_init(void)
{
pfault_refbk_t refbk =
{ 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
__PF_RES_FIELD };
int rc;
if (!MACHINE_IS_VM || pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
"0: j 2f\n"
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
__ctl_set_bit(0, 9);
return rc;
}
void pfault_fini(void)
{
pfault_refbk_t refbk =
{ 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
if (!MACHINE_IS_VM || pfault_disable)
return;
__ctl_clear_bit(0,9);
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
}
static void pfault_interrupt(__u16 error_code)
{
struct task_struct *tsk;
__u16 subcode;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
*/
subcode = S390_lowcore.cpu_addr;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
/*
* Get the token (= address of the task structure of the affected task).
*/
tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
wake_up_process(tsk);
put_task_struct(tsk);
}
} else {
/* signal bit not set -> a real page is missing. */
get_task_struct(tsk);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
/* Completion interrupt was faster than the initial
* interrupt (swapped in a -1 for pfault_wait). Set
* pfault_wait back to zero and exit. This can be
* done safely because tsk is running in kernel
* mode and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
set_task_state(tsk, TASK_RUNNING);
put_task_struct(tsk);
} else
set_tsk_need_resched(tsk);
}
}
void __init pfault_irq_init(void)
{
if (!MACHINE_IS_VM)
return;
/*
* Try to get pfault pseudo page faults going.
*/
if (register_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault) != 0)
panic("Couldn't request external interrupt 0x2603");
if (pfault_init() == 0)
return;
/* Tough luck, no pfault. */
pfault_disable = 1;
unregister_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault);
}
#endif

View File

@@ -0,0 +1,140 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
* Copyright 2007 IBM Corp.
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval)
{
pmd_t *pmdp = (pmd_t *) pteptr;
pte_t shadow_pteval = pteval;
unsigned long mask;
if (!MACHINE_HAS_HPAGE) {
pteptr = (pte_t *) pte_page(pteval)[1].index;
mask = pte_val(pteval) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
if (mm->context.noexec) {
pteptr += PTRS_PER_PTE;
pte_val(shadow_pteval) =
(_SEGMENT_ENTRY + __pa(pteptr)) | mask;
}
}
pmd_val(*pmdp) = pte_val(pteval);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
pmd_val(*pmdp) = pte_val(shadow_pteval);
}
}
int arch_prepare_hugepage(struct page *page)
{
unsigned long addr = page_to_phys(page);
pte_t pte;
pte_t *ptep;
int i;
if (MACHINE_HAS_HPAGE)
return 0;
ptep = (pte_t *) pte_alloc_one(&init_mm, address);
if (!ptep)
return -ENOMEM;
pte = mk_pte(page, PAGE_RW);
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
}
page[1].index = (unsigned long) ptep;
return 0;
}
void arch_release_hugepage(struct page *page)
{
pte_t *ptep;
if (MACHINE_HAS_HPAGE)
return;
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
pte_free(&init_mm, ptep);
page[1].index = 0;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
pudp = pud_alloc(mm, pgdp, addr);
if (pudp)
pmdp = pmd_alloc(mm, pudp, addr);
return (pte_t *) pmdp;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
if (pgd_present(*pgdp)) {
pudp = pud_offset(pgdp, addr);
if (pud_present(*pudp))
pmdp = pmd_offset(pudp, addr);
}
return (pte_t *) pmdp;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd)
{
if (!MACHINE_HAS_HPAGE)
return 0;
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
}
int pud_huge(pud_t pud)
{
return 0;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmdp, int write)
{
struct page *page;
if (!MACHINE_HAS_HPAGE)
return NULL;
page = pmd_page(*pmdp);
if (page)
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}

193
kernel/arch/s390/mm/init.c Normal file
View File

@@ -0,0 +1,193 @@
/*
* arch/s390/mm/init.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
EXPORT_SYMBOL(empty_zero_page);
/*
* paging_init() sets up the page tables
*/
void __init paging_init(void)
{
static const int ssm_mask = 0x04000000L;
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
init_mm.pgd = swapper_pg_dir;
S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
/* A three level page table (4TB) is enough for the kernel space. */
S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
#else
S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
__raw_local_irq_ssm(ssm_mask);
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
}
void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize;
max_mapnr = num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
/* Setup guest page hinting */
cmma_init();
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
reservedpages = 0;
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&_stext,
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long address;
int i;
for (i = 0; i < numpages; i++) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
ptep_invalidate(&init_mm, address, pte);
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
/* Flush cpu write queue. */
mb();
}
}
#endif
void free_initmem(void)
{
unsigned long addr;
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
printk ("Freeing unused kernel memory: %ldk freed\n",
((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat;
struct zone *zone;
int rc;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_MOVABLE;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */

View File

@@ -0,0 +1,61 @@
/*
* Access kernel memory without faulting -- s390 specific implementation.
*
* Copyright IBM Corp. 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/system.h>
/*
* This function writes to kernel memory bypassing DAT and possible
* write protection. It copies one to four bytes from src to dst
* using the stura instruction.
* Returns the number of bytes copied or -EFAULT.
*/
static long probe_kernel_write_odd(void *dst, void *src, size_t size)
{
unsigned long count, aligned;
int offset, mask;
int rc = -EFAULT;
aligned = (unsigned long) dst & ~3UL;
offset = (unsigned long) dst & 3;
count = min_t(unsigned long, 4 - offset, size);
mask = (0xf << (4 - count)) & 0xf;
mask >>= offset;
asm volatile(
" bras 1,0f\n"
" icm 0,0,0(%3)\n"
"0: l 0,0(%1)\n"
" lra %1,0(%1)\n"
"1: ex %2,0(1)\n"
"2: stura 0,%1\n"
" la %0,0\n"
"3:\n"
EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
: "+d" (rc), "+a" (aligned)
: "a" (mask), "a" (src) : "cc", "memory", "0", "1");
return rc ? rc : count;
}
long probe_kernel_write(void *dst, void *src, size_t size)
{
long copied = 0;
while (size) {
copied = probe_kernel_write_odd(dst, src, size);
if (copied < 0)
break;
dst += copied;
src += copied;
size -= copied;
}
return copied < 0 ? -EFAULT : 0;
}

166
kernel/arch/s390/mm/mmap.c Normal file
View File

@@ -0,0 +1,166 @@
/*
* linux/arch/s390/mm/mmap.c
*
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* Started by Ingo Molnar <mingo@elte.hu>
*/
#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/compat.h>
/*
* Top of mmap area (just below the process stack).
*
* Leave an at least ~128 MB hole.
*/
#define MIN_GAP (128*1024*1024)
#define MAX_GAP (STACK_TOP/6*5)
static inline unsigned long mmap_base(void)
{
unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return STACK_TOP - (gap & PAGE_MASK);
}
static inline int mmap_is_legacy(void)
{
#ifdef CONFIG_64BIT
/*
* Force standard allocation for 64 bit programs.
*/
if (!is_compat_task())
return 1;
#endif
return sysctl_legacy_va_layout ||
(current->personality & ADDR_COMPAT_LAYOUT) ||
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
}
#ifndef CONFIG_64BIT
/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
}
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#else
int s390_mmap_check(unsigned long addr, unsigned long len)
{
if (!is_compat_task() &&
len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
static unsigned long
s390_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
unsigned long area;
int rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
if (!(area & ~PAGE_MASK))
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return area;
}
static unsigned long
s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct mm_struct *mm = current->mm;
unsigned long area;
int rc;
area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
if (!(area & ~PAGE_MASK))
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
return area;
}
/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = s390_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
}
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#endif

View File

@@ -0,0 +1,113 @@
/*
* Copyright IBM Corp. 2008
*
* Guest page hinting for unused pages.
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/init.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
static int cmma_flag = 1;
static int __init cmma(char *str)
{
char *parm;
parm = strstrip(str);
if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
cmma_flag = 1;
return 1;
}
cmma_flag = 0;
if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
return 1;
return 0;
}
__setup("cmma=", cmma);
void __init cmma_init(void)
{
register unsigned long tmp asm("0") = 0;
register int rc asm("1") = -EOPNOTSUPP;
if (!cmma_flag)
return;
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+&d" (rc), "+&d" (tmp));
if (rc)
cmma_flag = 0;
}
static inline void set_page_unstable(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_UNUSED));
}
void arch_free_page(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_unstable(page, order);
}
static inline void set_page_stable(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_STABLE));
}
void arch_alloc_page(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_stable(page, order);
}
void arch_set_page_states(int make_stable)
{
unsigned long flags, order, t;
struct list_head *l;
struct page *page;
struct zone *zone;
if (!cmma_flag)
return;
if (make_stable)
drain_local_pages(NULL);
for_each_populated_zone(zone) {
spin_lock_irqsave(&zone->lock, flags);
for_each_migratetype_order(order, t) {
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
set_page_stable(page, order);
else
set_page_unstable(page, order);
}
}
spin_unlock_irqrestore(&zone->lock, flags);
}
}

View File

@@ -0,0 +1,337 @@
/*
* Copyright IBM Corp. 2007,2009
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
#define TABLES_PER_PAGE 4
#define FRAG_MASK 15UL
#define SECOND_HALVES 10UL
void clear_table_pgstes(unsigned long *table)
{
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
memset(table + 256, 0, PAGE_SIZE/4);
clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
memset(table + 768, 0, PAGE_SIZE/4);
}
#else
#define ALLOC_ORDER 2
#define TABLES_PER_PAGE 2
#define FRAG_MASK 3UL
#define SECOND_HALVES 2UL
void clear_table_pgstes(unsigned long *table)
{
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
memset(table + 256, 0, PAGE_SIZE/2);
}
#endif
unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
EXPORT_SYMBOL(VMALLOC_START);
static int __init parse_vmalloc(char *arg)
{
if (!arg)
return -EINVAL;
VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
return 0;
}
early_param("vmalloc", parse_vmalloc);
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
spin_lock(&mm->context.list_lock);
list_add(&page->lru, &mm->context.crst_list);
spin_unlock(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
struct page *page = virt_to_page(table);
spin_lock(&mm->context.list_lock);
list_del(&page->lru);
spin_unlock(&mm->context.list_lock);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
#ifdef CONFIG_64BIT
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
BUG_ON(limit > (1UL << 53));
repeat:
table = crst_table_alloc(mm, mm->context.noexec);
if (!table)
return -ENOMEM;
spin_lock(&mm->page_table_lock);
if (mm->context.asce_limit < limit) {
pgd = (unsigned long *) mm->pgd;
if (mm->context.asce_limit <= (1UL << 31)) {
entry = _REGION3_ENTRY_EMPTY;
mm->context.asce_limit = 1UL << 42;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION3;
} else {
entry = _REGION2_ENTRY_EMPTY;
mm->context.asce_limit = 1UL << 53;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION2;
}
crst_table_init(table, entry);
pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
}
spin_unlock(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
update_mm(mm, current);
return 0;
}
void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
if (mm->context.asce_limit <= limit)
return;
__tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
case _REGION_ENTRY_TYPE_R2:
mm->context.asce_limit = 1UL << 42;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION3;
break;
case _REGION_ENTRY_TYPE_R3:
mm->context.asce_limit = 1UL << 31;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_SEGMENT;
break;
default:
BUG();
}
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
update_mm(mm, current);
}
#endif
/*
* page table entry allocation/free routines.
*/
unsigned long *page_table_alloc(struct mm_struct *mm)
{
struct page *page;
unsigned long *table;
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
spin_lock(&mm->context.list_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
page = NULL;
}
if (!page) {
spin_unlock(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
if (mm->context.has_pgste)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
spin_lock(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
table = (unsigned long *) page_to_phys(page);
while (page->flags & bits) {
table += 256;
bits <<= 1;
}
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock(&mm->context.list_lock);
return table;
}
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page;
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->context.list_lock);
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
list_move(&page->lru, &mm->context.pgtable_list);
page = NULL;
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru);
spin_unlock(&mm->context.list_lock);
if (page) {
pgtable_page_dtor(page);
__free_page(page);
}
}
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
spin_lock(&mm->context.list_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
free_pages((unsigned long) page->index, ALLOC_ORDER);
page->index = 0;
}
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
spin_unlock(&mm->context.list_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}
/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
{
struct task_struct *tsk = current;
struct mm_struct *mm, *old_mm;
/* Do we have switched amode? If no, we cannot do sie */
if (!switch_amode)
return -EINVAL;
/* Do we have pgstes? if yes, we are done */
if (tsk->mm->context.has_pgste)
return 0;
/* lets check if we are allowed to replace the mm */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
#ifdef CONFIG_AIO
!hlist_empty(&tsk->mm->ioctx_list) ||
#endif
tsk->mm != tsk->active_mm) {
task_unlock(tsk);
return -EINVAL;
}
task_unlock(tsk);
/* we copy the mm and let dup_mm create the page tables with_pgstes */
tsk->mm->context.alloc_pgste = 1;
mm = dup_mm(tsk);
tsk->mm->context.alloc_pgste = 0;
if (!mm)
return -ENOMEM;
/* Now lets check again if something happened */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
#ifdef CONFIG_AIO
!hlist_empty(&tsk->mm->ioctx_list) ||
#endif
tsk->mm != tsk->active_mm) {
mmput(mm);
task_unlock(tsk);
return -EINVAL;
}
/* ok, we are alone. No ptrace, no threads, etc. */
old_mm = tsk->mm;
tsk->mm = tsk->active_mm = mm;
preempt_disable();
update_mm(mm, tsk);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
preempt_enable();
task_unlock(tsk);
mmput(old_mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
bool kernel_page_present(struct page *page)
{
unsigned long addr;
int cc;
addr = page_to_phys(page);
asm volatile(
" lra %1,0(%1)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (cc), "+a" (addr) : : "cc");
return cc == 0;
}
#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */

385
kernel/arch/s390/mm/vmem.c Normal file
View File

@@ -0,0 +1,385 @@
/*
* arch/s390/mm/vmem.c
*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
static DEFINE_MUTEX(vmem_mutex);
struct memory_segment {
struct list_head list;
unsigned long start;
unsigned long size;
};
static LIST_HEAD(mem_segs);
static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}
static inline pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
#ifdef CONFIG_64BIT
pud = vmem_alloc_pages(2);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
#endif
return pud;
}
static inline pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
#ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
#endif
return pmd;
}
static pte_t __ref *vmem_pte_alloc(void)
{
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
/*
* Add a physical memory range to the 1:1 mapping.
*/
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
{
unsigned long address;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
int ret = -ENOMEM;
for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate_kernel(&init_mm, pu_dir, pm_dir);
}
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
#ifdef __s390x__
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
(address + HPAGE_SIZE <= start + size) &&
(address >= HPAGE_SIZE)) {
pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
pmd_val(*pm_dir) = pte_val(pte);
address += HPAGE_SIZE - PAGE_SIZE;
continue;
}
#endif
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
*pt_dir = pte;
}
ret = 0;
out:
flush_tlb_kernel_range(start, start + size);
return ret;
}
/*
* Remove a physical memory range from the 1:1 mapping.
* Currently only invalidates page table entries.
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
unsigned long address;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY;
for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir))
continue;
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir))
continue;
if (pmd_huge(*pm_dir)) {
pmd_clear_kernel(pm_dir);
address += HPAGE_SIZE - PAGE_SIZE;
continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
*pt_dir = pte;
}
flush_tlb_kernel_range(start, start + size);
}
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
unsigned long address, start_addr, end_addr;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
int ret = -ENOMEM;
start_addr = (unsigned long) start;
end_addr = (unsigned long) (start + nr);
for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate_kernel(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
unsigned long new_page;
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte;
}
}
memset(start, 0, nr * sizeof(struct page));
ret = 0;
out:
flush_tlb_kernel_range(start_addr, end_addr);
return ret;
}
/*
* Add memory segment to the segment list if it doesn't overlap with
* an already present segment.
*/
static int insert_memory_segment(struct memory_segment *seg)
{
struct memory_segment *tmp;
if (seg->start + seg->size > VMEM_MAX_PHYS ||
seg->start + seg->size < seg->start)
return -ERANGE;
list_for_each_entry(tmp, &mem_segs, list) {
if (seg->start >= tmp->start + tmp->size)
continue;
if (seg->start + seg->size <= tmp->start)
continue;
return -ENOSPC;
}
list_add(&seg->list, &mem_segs);
return 0;
}
/*
* Remove memory segment from the segment list.
*/
static void remove_memory_segment(struct memory_segment *seg)
{
list_del(&seg->list);
}
static void __remove_shared_memory(struct memory_segment *seg)
{
remove_memory_segment(seg);
vmem_remove_range(seg->start, seg->size);
}
int vmem_remove_mapping(unsigned long start, unsigned long size)
{
struct memory_segment *seg;
int ret;
mutex_lock(&vmem_mutex);
ret = -ENOENT;
list_for_each_entry(seg, &mem_segs, list) {
if (seg->start == start && seg->size == size)
break;
}
if (seg->start != start || seg->size != size)
goto out;
ret = 0;
__remove_shared_memory(seg);
kfree(seg);
out:
mutex_unlock(&vmem_mutex);
return ret;
}
int vmem_add_mapping(unsigned long start, unsigned long size)
{
struct memory_segment *seg;
int ret;
mutex_lock(&vmem_mutex);
ret = -ENOMEM;
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
goto out;
seg->start = start;
seg->size = size;
ret = insert_memory_segment(seg);
if (ret)
goto out_free;
ret = vmem_add_mem(start, size, 0);
if (ret)
goto out_remove;
goto out;
out_remove:
__remove_shared_memory(seg);
out_free:
kfree(seg);
out:
mutex_unlock(&vmem_mutex);
return ret;
}
/*
* map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug
* additional memory segments.
*/
void __init vmem_map_init(void)
{
unsigned long ro_start, ro_end;
unsigned long start, end;
int i;
spin_lock_init(&init_mm.context.list_lock);
INIT_LIST_HEAD(&init_mm.context.crst_list);
INIT_LIST_HEAD(&init_mm.context.pgtable_list);
init_mm.context.noexec = 0;
ro_start = ((unsigned long)&_stext) & PAGE_MASK;
ro_end = PFN_ALIGN((unsigned long)&_eshared);
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
start = memory_chunk[i].addr;
end = memory_chunk[i].addr + memory_chunk[i].size;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)
vmem_add_mem(start, end - start, 1);
else if (start >= ro_start) {
vmem_add_mem(start, ro_end - start, 1);
vmem_add_mem(ro_end, end - ro_end, 0);
} else if (end < ro_end) {
vmem_add_mem(start, ro_start - start, 0);
vmem_add_mem(ro_start, end - ro_start, 1);
} else {
vmem_add_mem(start, ro_start - start, 0);
vmem_add_mem(ro_start, ro_end - ro_start, 1);
vmem_add_mem(ro_end, end - ro_end, 0);
}
}
}
/*
* Convert memory chunk array to a memory segment list so there is a single
* list that contains both r/w memory and shared memory segments.
*/
static int __init vmem_convert_memory_chunk(void)
{
struct memory_segment *seg;
int i;
mutex_lock(&vmem_mutex);
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size)
continue;
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
panic("Out of memory...\n");
seg->start = memory_chunk[i].addr;
seg->size = memory_chunk[i].size;
insert_memory_segment(seg);
}
mutex_unlock(&vmem_mutex);
return 0;
}
core_initcall(vmem_convert_memory_chunk);