add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
#
# Makefile for the S/390 specific device drivers
#
obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
drivers-y += drivers/s390/built-in.o

View File

@@ -0,0 +1,64 @@
comment "S/390 block device drivers"
depends on S390 && BLOCK
config BLK_DEV_XPRAM
tristate "XPRAM disk support"
depends on S390 && BLOCK
help
Select this option if you want to use your expanded storage on S/390
or zSeries as a disk. This is useful as a _fast_ swap device if you
want to access more than 2G of memory when running in 31 bit mode.
This option is also available as a module which will be called
xpram. If unsure, say "N".
config DCSSBLK
tristate "DCSSBLK support"
depends on S390 && BLOCK
help
Support for dcss block device
config DASD
tristate "Support for DASD devices"
depends on CCW && BLOCK
select IOSCHED_DEADLINE
help
Enable this option if you want to access DASDs directly utilizing
S/390s channel subsystem commands. This is necessary for running
natively on a single image or an LPAR.
config DASD_PROFILE
bool "Profiling support for dasd devices"
depends on DASD
help
Enable this option if you want to see profiling information
in /proc/dasd/statistics.
config DASD_ECKD
tristate "Support for ECKD Disks"
depends on DASD
help
ECKD devices are the most commonly used devices. You should enable
this option unless you are very sure to have no ECKD device.
config DASD_FBA
tristate "Support for FBA Disks"
depends on DASD
help
Select this option to be able to access FBA devices. It is safe to
say "Y".
config DASD_DIAG
tristate "Support for DIAG access to Disks"
depends on DASD
help
Select this option if you want to use Diagnose250 command to access
Disks under VM. If you are not running under VM or unsure what it is,
say "N".
config DASD_EER
bool "Extended error reporting (EER)"
depends on DASD
help
This driver provides a character device interface to the
DASD extended error reporting. This is only needed if you want to
use applications written for the EER facility.

View File

@@ -0,0 +1,19 @@
#
# S/390 block devices
#
dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
dasd_fba_mod-objs := dasd_fba.o
dasd_diag_mod-objs := dasd_diag.o
dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
dasd_genhd.o dasd_erp.o
ifdef CONFIG_DASD_EER
dasd_mod-objs += dasd_eer.o
endif
obj-$(CONFIG_DASD) += dasd_mod.o
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,910 @@
/*
* PAV alias management for the DASD ECKD discipline
*
* Copyright IBM Corporation, 2007
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/list.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eckd):"
/*
* General concept of alias management:
* - PAV and DASD alias management is specific to the eckd discipline.
* - A device is connected to an lcu as long as the device exists.
* dasd_alias_make_device_known_to_lcu will be called wenn the
* device is checked by the eckd discipline and
* dasd_alias_disconnect_device_from_lcu will be called
* before the device is deleted.
* - The dasd_alias_add_device / dasd_alias_remove_device
* functions mark the point when a device is 'ready for service'.
* - A summary unit check is a rare occasion, but it is mandatory to
* support it. It requires some complex recovery actions before the
* devices can be used again (see dasd_alias_handle_summary_unit_check).
* - dasd_alias_get_start_dev will find an alias device that can be used
* instead of the base device and does some (very simple) load balancing.
* This is the function that gets called for each I/O, so when improving
* something, this function should get faster or better, the rest has just
* to be correct.
*/
static void summary_unit_check_handling_work(struct work_struct *);
static void lcu_update_work(struct work_struct *);
static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
static struct alias_root aliastree = {
.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
};
static struct alias_server *_find_server(struct dasd_uid *uid)
{
struct alias_server *pos;
list_for_each_entry(pos, &aliastree.serverlist, server) {
if (!strncmp(pos->uid.vendor, uid->vendor,
sizeof(uid->vendor))
&& !strncmp(pos->uid.serial, uid->serial,
sizeof(uid->serial)))
return pos;
};
return NULL;
}
static struct alias_lcu *_find_lcu(struct alias_server *server,
struct dasd_uid *uid)
{
struct alias_lcu *pos;
list_for_each_entry(pos, &server->lculist, lcu) {
if (pos->uid.ssid == uid->ssid)
return pos;
};
return NULL;
}
static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
struct dasd_uid *uid)
{
struct alias_pav_group *pos;
__u8 search_unit_addr;
/* for hyper pav there is only one group */
if (lcu->pav == HYPER_PAV) {
if (list_empty(&lcu->grouplist))
return NULL;
else
return list_first_entry(&lcu->grouplist,
struct alias_pav_group, group);
}
/* for base pav we have to find the group that matches the base */
if (uid->type == UA_BASE_DEVICE)
search_unit_addr = uid->real_unit_addr;
else
search_unit_addr = uid->base_unit_addr;
list_for_each_entry(pos, &lcu->grouplist, group) {
if (pos->uid.base_unit_addr == search_unit_addr &&
!strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
return pos;
};
return NULL;
}
static struct alias_server *_allocate_server(struct dasd_uid *uid)
{
struct alias_server *server;
server = kzalloc(sizeof(*server), GFP_KERNEL);
if (!server)
return ERR_PTR(-ENOMEM);
memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
INIT_LIST_HEAD(&server->server);
INIT_LIST_HEAD(&server->lculist);
return server;
}
static void _free_server(struct alias_server *server)
{
kfree(server);
}
static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
{
struct alias_lcu *lcu;
lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
if (!lcu)
return ERR_PTR(-ENOMEM);
lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
if (!lcu->uac)
goto out_err1;
lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr)
goto out_err2;
lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr->cpaddr)
goto out_err3;
lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr->data)
goto out_err4;
memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
lcu->uid.ssid = uid->ssid;
lcu->pav = NO_PAV;
lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
INIT_LIST_HEAD(&lcu->lcu);
INIT_LIST_HEAD(&lcu->inactive_devices);
INIT_LIST_HEAD(&lcu->active_devices);
INIT_LIST_HEAD(&lcu->grouplist);
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
spin_lock_init(&lcu->lock);
return lcu;
out_err4:
kfree(lcu->rsu_cqr->cpaddr);
out_err3:
kfree(lcu->rsu_cqr);
out_err2:
kfree(lcu->uac);
out_err1:
kfree(lcu);
return ERR_PTR(-ENOMEM);
}
static void _free_lcu(struct alias_lcu *lcu)
{
kfree(lcu->rsu_cqr->data);
kfree(lcu->rsu_cqr->cpaddr);
kfree(lcu->rsu_cqr);
kfree(lcu->uac);
kfree(lcu);
}
/*
* This is the function that will allocate all the server and lcu data,
* so this function must be called first for a new device.
* If the return value is 1, the lcu was already known before, if it
* is 0, this is a new lcu.
* Negative return code indicates that something went wrong (e.g. -ENOMEM)
*/
int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
{
struct dasd_eckd_private *private;
unsigned long flags;
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
int is_lcu_known;
struct dasd_uid *uid;
private = (struct dasd_eckd_private *) device->private;
uid = &private->uid;
spin_lock_irqsave(&aliastree.lock, flags);
is_lcu_known = 1;
server = _find_server(uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newserver = _allocate_server(uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
is_lcu_known = 0;
} else {
/* someone was faster */
_free_server(newserver);
}
}
lcu = _find_lcu(server, uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newlcu = _allocate_lcu(uid);
if (IS_ERR(newlcu))
return PTR_ERR(lcu);
spin_lock_irqsave(&aliastree.lock, flags);
lcu = _find_lcu(server, uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
is_lcu_known = 0;
} else {
/* someone was faster */
_free_lcu(newlcu);
}
is_lcu_known = 0;
}
spin_lock(&lcu->lock);
list_add(&device->alias_list, &lcu->inactive_devices);
private->lcu = lcu;
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(&aliastree.lock, flags);
return is_lcu_known;
}
/*
* This function removes a device from the scope of alias management.
* The complicated part is to make sure that it is not in use by
* any of the workers. If necessary cancel the work.
*/
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
{
struct dasd_eckd_private *private;
unsigned long flags;
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
if (device == lcu->suc_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
cancel_work_sync(&lcu->suc_data.worker);
spin_lock_irqsave(&lcu->lock, flags);
if (device == lcu->suc_data.device)
lcu->suc_data.device = NULL;
}
was_pending = 0;
if (device == lcu->ruac_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
was_pending = 1;
cancel_delayed_work_sync(&lcu->ruac_data.dwork);
spin_lock_irqsave(&lcu->lock, flags);
if (device == lcu->ruac_data.device)
lcu->ruac_data.device = NULL;
}
private->lcu = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
spin_lock_irqsave(&aliastree.lock, flags);
spin_lock(&lcu->lock);
if (list_empty(&lcu->grouplist) &&
list_empty(&lcu->active_devices) &&
list_empty(&lcu->inactive_devices)) {
list_del(&lcu->lcu);
spin_unlock(&lcu->lock);
_free_lcu(lcu);
lcu = NULL;
} else {
if (was_pending)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
server = _find_server(&private->uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
}
spin_unlock_irqrestore(&aliastree.lock, flags);
}
/*
* This function assumes that the unit address configuration stored
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
static int _add_device_to_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
struct dasd_uid *uid;
private = (struct dasd_eckd_private *) device->private;
uid = &private->uid;
uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
dasd_set_uid(device->cdev, &private->uid);
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
list_move(&device->alias_list, &lcu->active_devices);
return 0;
}
group = _find_group(lcu, uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
group->uid.ssid = uid->ssid;
if (uid->type == UA_BASE_DEVICE)
group->uid.base_unit_addr = uid->real_unit_addr;
else
group->uid.base_unit_addr = uid->base_unit_addr;
memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
if (uid->type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
private->pavgroup = group;
return 0;
};
static void _remove_device_from_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
private = (struct dasd_eckd_private *) device->private;
list_move(&device->alias_list, &lcu->inactive_devices);
group = private->pavgroup;
if (!group)
return;
private->pavgroup = NULL;
if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
list_del(&group->group);
kfree(group);
return;
}
if (group->next == device)
group->next = NULL;
};
static int read_unit_address_configuration(struct dasd_device *device,
struct alias_lcu *lcu)
{
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
unsigned long flags;
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
device);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 10;
cqr->expires = 20 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x0e; /* Read unit address configuration */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)(addr_t) prssdp;
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
ccw->cda = (__u32)(addr_t) lcu->uac;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
/* need to unset flag here to detect race with summary unit check */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
do {
rc = dasd_sleep_on(cqr);
} while (rc && (cqr->retries > 0));
if (rc) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
dasd_kfree_request(cqr, cqr->memdev);
return rc;
}
static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
{
unsigned long flags;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *device, *tempdev;
int i, rc;
struct dasd_eckd_private *private;
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
private = (struct dasd_eckd_private *) device->private;
private->pavgroup = NULL;
}
list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
private = (struct dasd_eckd_private *) device->private;
private->pavgroup = NULL;
}
list_del(&pavgroup->group);
kfree(pavgroup);
}
spin_unlock_irqrestore(&lcu->lock, flags);
rc = read_unit_address_configuration(refdev, lcu);
if (rc)
return rc;
spin_lock_irqsave(&lcu->lock, flags);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
case UA_BASE_PAV_ALIAS:
lcu->pav = BASE_PAV;
break;
case UA_HYPER_PAV_ALIAS:
lcu->pav = HYPER_PAV;
break;
}
if (lcu->pav != NO_PAV)
break;
}
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
_add_device_to_lcu(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
static void lcu_update_work(struct work_struct *work)
{
struct alias_lcu *lcu;
struct read_uac_work_data *ruac_data;
struct dasd_device *device;
unsigned long flags;
int rc;
ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
device = ruac_data->device;
rc = _lcu_update(device, lcu);
/*
* Need to check flags again, as there could have been another
* prepare_update or a new device a new device while we were still
* processing the data
*/
spin_lock_irqsave(&lcu->lock, flags);
if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
" alias data in lcu (rc = %d), retry later", rc);
schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
} else {
lcu->ruac_data.device = NULL;
lcu->flags &= ~UPDATE_PENDING;
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
static int _schedule_lcu_update(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_device *usedev = NULL;
struct alias_pav_group *group;
lcu->flags |= NEED_UAC_UPDATE;
if (lcu->ruac_data.device) {
/* already scheduled or running */
return 0;
}
if (device && !list_empty(&device->alias_list))
usedev = device;
if (!usedev && !list_empty(&lcu->grouplist)) {
group = list_first_entry(&lcu->grouplist,
struct alias_pav_group, group);
if (!list_empty(&group->baselist))
usedev = list_first_entry(&group->baselist,
struct dasd_device,
alias_list);
else if (!list_empty(&group->aliaslist))
usedev = list_first_entry(&group->aliaslist,
struct dasd_device,
alias_list);
}
if (!usedev && !list_empty(&lcu->active_devices)) {
usedev = list_first_entry(&lcu->active_devices,
struct dasd_device, alias_list);
}
/*
* if we haven't found a proper device yet, give up for now, the next
* device that will be set active will trigger an lcu update
*/
if (!usedev)
return -EINVAL;
lcu->ruac_data.device = usedev;
schedule_delayed_work(&lcu->ruac_data.dwork, 0);
return 0;
}
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_lcu *lcu;
unsigned long flags;
int rc;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
if (lcu->flags & UPDATE_PENDING) {
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
return rc;
}
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_lcu *lcu;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
spin_lock_irqsave(&lcu->lock, flags);
_remove_device_from_lcu(lcu, device);
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_device *alias_device;
struct alias_pav_group *group;
struct alias_lcu *lcu;
struct dasd_eckd_private *private, *alias_priv;
unsigned long flags;
private = (struct dasd_eckd_private *) base_device->private;
group = private->pavgroup;
lcu = private->lcu;
if (!group || !lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
return NULL;
spin_lock_irqsave(&lcu->lock, flags);
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
spin_unlock_irqrestore(&lcu->lock, flags);
return NULL;
} else {
alias_device = list_first_entry(&group->aliaslist,
struct dasd_device,
alias_list);
}
}
if (list_is_last(&alias_device->alias_list, &group->aliaslist))
group->next = list_first_entry(&group->aliaslist,
struct dasd_device, alias_list);
else
group->next = list_first_entry(&alias_device->alias_list,
struct dasd_device, alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
alias_priv = (struct dasd_eckd_private *) alias_device->private;
if ((alias_priv->count < private->count) && !alias_device->stopped)
return alias_device;
else
return NULL;
}
/*
* Summary unit check handling depends on the way alias devices
* are handled so it is done here rather then in dasd_eckd.c
*/
static int reset_summary_unit_check(struct alias_lcu *lcu,
struct dasd_device *device,
char reason)
{
struct dasd_ccw_req *cqr;
int rc = 0;
struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
strncpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = 0 ;
ccw->count = 16;
ccw->cda = (__u32)(addr_t) cqr->data;
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 255; /* set retry counter to enable basic ERP */
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 5 * HZ;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
return rc;
}
static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
if (private->uid.type != UA_BASE_DEVICE)
continue;
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
if (private->uid.type != UA_BASE_DEVICE)
continue;
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
}
}
static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device, *temp;
struct dasd_eckd_private *private;
int rc;
unsigned long flags;
LIST_HEAD(active);
/*
* Problem here ist that dasd_flush_device_queue may wait
* for termination of a request to complete. We can't keep
* the lcu lock during that time, so we must assume that
* the lists may have changed.
* Idea: first gather all active alias devices in a separate list,
* then flush the first element of this list unlocked, and afterwards
* check if it is still on the list before moving it to the
* active_devices list.
*/
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(device, temp, &lcu->active_devices,
alias_list) {
private = (struct dasd_eckd_private *) device->private;
if (private->uid.type == UA_BASE_DEVICE)
continue;
list_move(&device->alias_list, &active);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_splice_init(&pavgroup->aliaslist, &active);
}
while (!list_empty(&active)) {
device = list_first_entry(&active, struct dasd_device,
alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
rc = dasd_flush_device_queue(device);
spin_lock_irqsave(&lcu->lock, flags);
/*
* only move device around if it wasn't moved away while we
* were waiting for the flush
*/
if (device == list_first_entry(&active,
struct dasd_device, alias_list))
list_move(&device->alias_list, &lcu->active_devices);
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
static void __stop_device_on_lcu(struct dasd_device *device,
struct dasd_device *pos)
{
/* If pos == device then device is already locked! */
if (pos == device) {
pos->stopped |= DASD_STOPPED_SU;
return;
}
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
spin_unlock(get_ccwdev_lock(pos->cdev));
}
/*
* This function is called in interrupt context, so the
* cdev lock for device is already locked!
*/
static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct alias_pav_group *pavgroup;
struct dasd_device *pos;
list_for_each_entry(pos, &lcu->active_devices, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(pos, &pavgroup->baselist, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
__stop_device_on_lcu(device, pos);
}
}
static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
unsigned long flags;
list_for_each_entry(device, &lcu->active_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~DASD_STOPPED_SU;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~DASD_STOPPED_SU;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~DASD_STOPPED_SU;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~DASD_STOPPED_SU;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
}
}
static void summary_unit_check_handling_work(struct work_struct *work)
{
struct alias_lcu *lcu;
struct summary_unit_check_work_data *suc_data;
unsigned long flags;
struct dasd_device *device;
suc_data = container_of(work, struct summary_unit_check_work_data,
worker);
lcu = container_of(suc_data, struct alias_lcu, suc_data);
device = suc_data->device;
/* 1. flush alias devices */
flush_all_alias_devices_on_lcu(lcu);
/* 2. reset summary unit check */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
reset_summary_unit_check(lcu, device, suc_data->reason);
spin_lock_irqsave(&lcu->lock, flags);
_unstop_all_devices_on_lcu(lcu);
_restart_all_base_devices_on_lcu(lcu);
/* 3. read new alias configuration */
_schedule_lcu_update(lcu, device);
lcu->suc_data.device = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
}
/*
* note: this will be called from int handler context (cdev locked)
*/
void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
struct irb *irb)
{
struct alias_lcu *lcu;
char reason;
struct dasd_eckd_private *private;
char *sense;
private = (struct dasd_eckd_private *) device->private;
sense = dasd_get_sense(irb);
if (sense) {
reason = sense[8];
DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
"eckd handle summary unit check: reason", reason);
} else {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"eckd handle summary unit check:"
" no reason code available");
return;
}
lcu = private->lcu;
if (!lcu) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device not ready to handle summary"
" unit check (no lcu structure)");
return;
}
spin_lock(&lcu->lock);
_stop_all_devices_on_lcu(lcu, device);
/* prepare for lcu_update */
private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
/* If this device is about to be removed just return and wait for
* the next interrupt on a different device
*/
if (list_empty(&device->alias_list)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device is in offline processing,"
" don't do summary unit check handling");
spin_unlock(&lcu->lock);
return;
}
if (lcu->suc_data.device) {
/* already scheduled or running */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"previous instance of summary unit check worker"
" still pending");
spin_unlock(&lcu->lock);
return ;
}
lcu->suc_data.reason = reason;
lcu->suc_data.device = device;
spin_unlock(&lcu->lock);
schedule_work(&lcu->suc_data.worker);
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,659 @@
/*
* File...........: linux/drivers/s390/block/dasd_diag.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
*/
#define KMSG_COMPONENT "dasd-diag"
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <asm/dasd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/s390_ext.h>
#include <asm/todclk.h>
#include <asm/vtoc.h>
#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
#define PRINTK_HEADER "dasd(diag):"
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
* amount of storage that is available in the static I/O buffer for each
* device. Currently each device gets 2 pages. We want to fit two requests
* into the available memory so that we can immediately start the next if one
* finishes. */
#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
sizeof(struct dasd_diag_req)) / \
sizeof(struct dasd_diag_bio)) / 2)
#define DIAG_MAX_RETRIES 32
#define DIAG_TIMEOUT 50 * HZ
static struct dasd_discipline dasd_diag_discipline;
struct dasd_diag_private {
struct dasd_diag_characteristics rdc_data;
struct dasd_diag_rw_io iob;
struct dasd_diag_init_io iib;
blocknum_t pt_block;
struct ccw_dev_id dev_id;
};
struct dasd_diag_req {
unsigned int block_count;
struct dasd_diag_bio bio[0];
};
static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
/* Perform DIAG250 call with block I/O parameter list iob (input and output)
* and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */
static inline int dia250(void *iob, int cmd)
{
register unsigned long reg2 asm ("2") = (unsigned long) iob;
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
int rc;
rc = 3;
asm volatile(
" diag 2,%2,0x250\n"
"0: ipm %0\n"
" srl %0,28\n"
" or %0,3\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc), "=m" (*(addr_type *) iob)
: "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
: "3", "cc");
return rc;
}
/* Initialize block I/O to DIAG device using the specified blocksize and
* block offset. On success, return zero and set end_block to contain the
* number of blocks on the device minus the specified offset. Return non-zero
* otherwise. */
static inline int
mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
blocknum_t offset, blocknum_t *end_block)
{
struct dasd_diag_private *private;
struct dasd_diag_init_io *iib;
int rc;
private = (struct dasd_diag_private *) device->private;
iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
iib->block_size = blocksize;
iib->offset = offset;
iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(iib, INIT_BIO);
if ((rc & 3) == 0 && end_block)
*end_block = iib->end_block;
return rc;
}
/* Remove block I/O environment for device. Return zero on success, non-zero
* otherwise. */
static inline int
mdsk_term_io(struct dasd_device * device)
{
struct dasd_diag_private *private;
struct dasd_diag_init_io *iib;
int rc;
private = (struct dasd_diag_private *) device->private;
iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
rc = dia250(iib, TERM_BIO);
return rc;
}
/* Error recovery for failed DIAG requests - try to reestablish the DIAG
* environment. */
static void
dasd_diag_erp(struct dasd_device *device)
{
int rc;
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc == 4) {
if (!(device->features & DASD_FEATURE_READONLY)) {
dev_warn(&device->cdev->dev,
"The access mode of a DIAG device changed"
" to read-only");
device->features |= DASD_FEATURE_READONLY;
}
rc = 0;
}
if (rc)
dev_warn(&device->cdev->dev, "DIAG ERP failed with "
"rc=%d\n", rc);
}
/* Start a given request at the device. Return zero on success, non-zero
* otherwise. */
static int
dasd_start_diag(struct dasd_ccw_req * cqr)
{
struct dasd_device *device;
struct dasd_diag_private *private;
struct dasd_diag_req *dreq;
int rc;
device = cqr->startdev;
if (cqr->retries < 0) {
DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
"- no retry left)", cqr);
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
private = (struct dasd_diag_private *) device->private;
dreq = (struct dasd_diag_req *) cqr->data;
private->iob.dev_nr = private->dev_id.devno;
private->iob.key = 0;
private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
private->iob.block_count = dreq->block_count;
private->iob.interrupt_params = (addr_t) cqr;
private->iob.bio_list = dreq->bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
cqr->startclk = get_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = dia250(&private->iob, RW_BIO);
switch (rc) {
case 0: /* Synchronous I/O finished successfully */
cqr->stopclk = get_clock();
cqr->status = DASD_CQR_SUCCESS;
/* Indicate to calling function that only a dasd_schedule_bh()
and no timer is needed */
rc = -EACCES;
break;
case 8: /* Asynchronous I/O was started */
cqr->status = DASD_CQR_IN_IO;
rc = 0;
break;
default: /* Error condition */
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
dasd_diag_erp(device);
rc = -EIO;
break;
}
cqr->intrc = rc;
return rc;
}
/* Terminate given request at the device. */
static int
dasd_diag_term_IO(struct dasd_ccw_req * cqr)
{
struct dasd_device *device;
device = cqr->startdev;
mdsk_term_io(device);
mdsk_init_io(device, device->block->bp_block, 0, NULL);
cqr->status = DASD_CQR_CLEAR_PENDING;
cqr->stopclk = get_clock();
dasd_schedule_device_bh(device);
return 0;
}
/* Handle external interruption. */
static void
dasd_ext_handler(__u16 code)
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long expires;
unsigned long flags;
u8 int_code, status;
addr_t ip;
int rc;
int_code = *((u8 *) DASD_DIAG_LC_INT_CODE);
status = *((u8 *) DASD_DIAG_LC_INT_STATUS);
switch (int_code) {
case DASD_DIAG_CODE_31BIT:
ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT);
break;
case DASD_DIAG_CODE_64BIT:
ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT);
break;
default:
return;
}
if (!ip) { /* no intparm: unsolicited interrupt */
DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
"interrupt");
return;
}
cqr = (struct dasd_ccw_req *) ip;
device = (struct dasd_device *) cqr->startdev;
if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
DBF_DEV_EVENT(DBF_WARNING, device,
" magic number of dasd_ccw_req 0x%08X doesn't"
" match discipline 0x%08X",
cqr->magic, *(int *) (&device->discipline->name));
return;
}
/* get irq lock to modify request queue */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* Check for a pending clear operation */
if (cqr->status == DASD_CQR_CLEAR_PENDING) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return;
}
cqr->stopclk = get_clock();
expires = 0;
if (status == 0) {
cqr->status = DASD_CQR_SUCCESS;
/* Start first request on queue if possible -> fast_io. */
if (!list_empty(&device->ccw_queue)) {
next = list_entry(device->ccw_queue.next,
struct dasd_ccw_req, devlist);
if (next->status == DASD_CQR_QUEUED) {
rc = dasd_start_diag(next);
if (rc == 0)
expires = next->expires;
}
}
} else {
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
"request %p was %d (%d retries left)", cqr, status,
cqr->retries);
dasd_diag_erp(device);
}
if (expires != 0)
dasd_device_set_timer(device, expires);
else
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
/* Check whether device can be controlled by DIAG discipline. Return zero on
* success, non-zero otherwise. */
static int
dasd_diag_check_device(struct dasd_device *device)
{
struct dasd_block *block;
struct dasd_diag_private *private;
struct dasd_diag_characteristics *rdc_data;
struct dasd_diag_bio bio;
struct vtoc_cms_label *label;
blocknum_t end_block;
unsigned int sb, bsize;
int rc;
private = (struct dasd_diag_private *) device->private;
if (private == NULL) {
private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
if (private == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Allocating memory for private DASD data "
"failed\n");
return -ENOMEM;
}
ccw_device_get_id(device->cdev, &private->dev_id);
device->private = (void *) private;
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"could not allocate dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
}
device->block = block;
block->base = device;
/* Read Device Characteristics */
rdc_data = (void *) &(private->rdc_data);
rdc_data->dev_nr = private->dev_id.devno;
rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
rc = diag210((struct diag210 *) rdc_data);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
"information (rc=%d)", rc);
rc = -EOPNOTSUPP;
goto out;
}
/* Figure out position of label block */
switch (private->rdc_data.vdev_class) {
case DEV_CLASS_FBA:
private->pt_block = 1;
break;
case DEV_CLASS_ECKD:
private->pt_block = 2;
break;
default:
dev_warn(&device->cdev->dev, "Device type %d is not supported "
"in DIAG mode\n", private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
DBF_DEV_EVENT(DBF_INFO, device,
"%04X: %04X on real %04X/%02X",
rdc_data->dev_nr,
rdc_data->vdev_type,
rdc_data->rdev_type, rdc_data->rdev_model);
/* terminate all outstanding operations */
mdsk_term_io(device);
/* figure out blocksize of device */
label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
if (label == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to allocate initialization request");
rc = -ENOMEM;
goto out;
}
rc = 0;
end_block = 0;
/* try all sizes - needed for ECKD devices */
for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
mdsk_init_io(device, bsize, 0, &end_block);
memset(&bio, 0, sizeof (struct dasd_diag_bio));
bio.type = MDSK_READ_REQ;
bio.block_number = private->pt_block + 1;
bio.buffer = label;
memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
private->iob.dev_nr = rdc_data->dev_nr;
private->iob.key = 0;
private->iob.flags = 0; /* do synchronous io */
private->iob.block_count = 1;
private->iob.interrupt_params = 0;
private->iob.bio_list = &bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
dev_warn(&device->cdev->dev,
"A 64-bit DIAG call failed\n");
rc = -EOPNOTSUPP;
goto out_label;
}
mdsk_term_io(device);
if (rc == 0)
break;
}
if (bsize > PAGE_SIZE) {
dev_warn(&device->cdev->dev, "Accessing the DASD failed because"
" of an incorrect format (rc=%d)\n", rc);
rc = -EIO;
goto out_label;
}
/* check for label block */
if (memcmp(label->label_id, DASD_DIAG_CMS1,
sizeof(DASD_DIAG_CMS1)) == 0) {
/* get formatted blocksize from label block */
bsize = (unsigned int) label->block_size;
block->blocks = (unsigned long) label->block_count;
} else
block->blocks = end_block;
block->bp_block = bsize;
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < bsize; sb = sb << 1)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc && (rc != 4)) {
dev_warn(&device->cdev->dev, "DIAG initialization "
"failed with rc=%d\n", rc);
rc = -EIO;
} else {
if (rc == 4)
device->features |= DASD_FEATURE_READONLY;
dev_info(&device->cdev->dev,
"New DASD with %ld byte/block, total size %ld KB%s\n",
(unsigned long) block->bp_block,
(unsigned long) (block->blocks <<
block->s2b_shift) >> 1,
(rc == 4) ? ", read-only device" : "");
rc = 0;
}
out_label:
free_page((long) label);
out:
if (rc) {
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
kfree(private);
}
return rc;
}
/* Fill in virtual disk geometry for device. Return zero on success, non-zero
* otherwise. */
static int
dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
{
if (dasd_check_blocksize(block->bp_block) != 0)
return -EINVAL;
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
geo->heads = 16;
geo->sectors = 128 >> block->s2b_shift;
return 0;
}
static dasd_erp_fn_t
dasd_diag_erp_action(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_action;
}
static dasd_erp_fn_t
dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_postaction;
}
/* Create DASD request from block device request. Return pointer to new
* request on success, ERR_PTR otherwise. */
static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
struct dasd_block *block,
struct request *req)
{
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
else if (rq_data_dir(req) == WRITE)
rw_cmd = MDSK_WRITE_REQ;
else
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* Build the request */
datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio);
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
dreq = (struct dasd_diag_req *) cqr->data;
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;
dbio->block_number = recid + 1;
dbio->buffer = dst;
dbio++;
dst += blksize;
recid++;
}
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = DIAG_TIMEOUT;
cqr->status = DASD_CQR_FILLED;
return cqr;
}
/* Release DASD request. Return non-zero if request was successful, zero
* otherwise. */
static int
dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
int status;
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
{
cqr->status = DASD_CQR_FILLED;
};
/* Fill in IOCTL data for device. */
static int
dasd_diag_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
struct dasd_diag_private *private;
private = (struct dasd_diag_private *) device->private;
info->label_block = (unsigned int) private->pt_block;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
info->characteristics_size = sizeof (struct dasd_diag_characteristics);
memcpy(info->characteristics,
&((struct dasd_diag_private *) device->private)->rdc_data,
sizeof (struct dasd_diag_characteristics));
info->confdata_size = 0;
return 0;
}
static void
dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *stat)
{
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"dump sense not available for DIAG data");
}
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
.max_blocks = DIAG_MAX_BLOCKS,
.check_device = dasd_diag_check_device,
.fill_geometry = dasd_diag_fill_geometry,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
.erp_action = dasd_diag_erp_action,
.erp_postaction = dasd_diag_erp_postaction,
.build_cp = dasd_diag_build_cp,
.free_cp = dasd_diag_free_cp,
.dump_sense = dasd_diag_dump_sense,
.fill_info = dasd_diag_fill_info,
};
static int __init
dasd_diag_init(void)
{
if (!MACHINE_IS_VM) {
pr_info("Discipline %s cannot be used without z/VM\n",
dasd_diag_discipline.name);
return -ENODEV;
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
ctl_set_bit(0, 9);
register_external_interrupt(0x2603, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
}
static void __exit
dasd_diag_cleanup(void)
{
unregister_external_interrupt(0x2603, dasd_ext_handler);
ctl_clear_bit(0, 9);
dasd_diag_discipline_pointer = NULL;
}
module_init(dasd_diag_init);
module_exit(dasd_diag_cleanup);

View File

@@ -0,0 +1,127 @@
/*
* File...........: linux/drivers/s390/block/dasd_diag.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
*/
#define MDSK_WRITE_REQ 0x01
#define MDSK_READ_REQ 0x02
#define INIT_BIO 0x00
#define RW_BIO 0x01
#define TERM_BIO 0x02
#define DEV_CLASS_FBA 0x01
#define DEV_CLASS_ECKD 0x04
#define DASD_DIAG_LC_INT_CODE 132
#define DASD_DIAG_LC_INT_STATUS 133
#define DASD_DIAG_LC_INT_PARM_31BIT 128
#define DASD_DIAG_LC_INT_PARM_64BIT 4536
#define DASD_DIAG_CODE_31BIT 0x03
#define DASD_DIAG_CODE_64BIT 0x07
#define DASD_DIAG_RWFLAG_ASYNC 0x02
#define DASD_DIAG_RWFLAG_NOCACHE 0x01
#define DASD_DIAG_FLAGA_FORMAT_64BIT 0x80
struct dasd_diag_characteristics {
u16 dev_nr;
u16 rdc_len;
u8 vdev_class;
u8 vdev_type;
u8 vdev_status;
u8 vdev_flags;
u8 rdev_class;
u8 rdev_type;
u8 rdev_model;
u8 rdev_features;
} __attribute__ ((packed, aligned(4)));
#ifdef CONFIG_64BIT
#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
typedef u64 blocknum_t;
typedef s64 sblocknum_t;
struct dasd_diag_bio {
u8 type;
u8 status;
u8 spare1[2];
u32 alet;
blocknum_t block_number;
void *buffer;
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_init_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u32 block_size;
u8 spare2[4];
blocknum_t offset;
sblocknum_t start_block;
blocknum_t end_block;
u8 spare3[8];
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_rw_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u8 key;
u8 flags;
u8 spare2[2];
u32 block_count;
u32 alet;
u8 spare3[4];
u64 interrupt_params;
struct dasd_diag_bio *bio_list;
u8 spare4[8];
} __attribute__ ((packed, aligned(8)));
#else /* CONFIG_64BIT */
#define DASD_DIAG_FLAGA_DEFAULT 0x0
typedef u32 blocknum_t;
typedef s32 sblocknum_t;
struct dasd_diag_bio {
u8 type;
u8 status;
u16 spare1;
blocknum_t block_number;
u32 alet;
void *buffer;
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_init_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u32 block_size;
blocknum_t offset;
sblocknum_t start_block;
blocknum_t end_block;
u8 spare2[24];
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_rw_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u8 key;
u8 flags;
u8 spare2[2];
u32 block_count;
u32 alet;
struct dasd_diag_bio *bio_list;
u32 interrupt_params;
u8 spare3[20];
} __attribute__ ((packed, aligned(8)));
#endif /* CONFIG_64BIT */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,464 @@
/*
* File...........: linux/drivers/s390/block/dasd_eckd.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
*/
#ifndef DASD_ECKD_H
#define DASD_ECKD_H
/*****************************************************************************
* SECTION: CCW Definitions
****************************************************************************/
#define DASD_ECKD_CCW_WRITE 0x05
#define DASD_ECKD_CCW_READ 0x06
#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
#define DASD_ECKD_CCW_WRITE_KD 0x0d
#define DASD_ECKD_CCW_READ_KD 0x0e
#define DASD_ECKD_CCW_ERASE 0x11
#define DASD_ECKD_CCW_READ_COUNT 0x12
#define DASD_ECKD_CCW_SLCK 0x14
#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
#define DASD_ECKD_CCW_WRITE_CKD 0x1d
#define DASD_ECKD_CCW_READ_CKD 0x1e
#define DASD_ECKD_CCW_PSF 0x27
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
#define DASD_ECKD_CCW_SNSS 0x54
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
#define DASD_ECKD_CCW_WRITE_MT 0x85
#define DASD_ECKD_CCW_READ_MT 0x86
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
#define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
#define DASD_ECKD_CCW_RESERVE 0xB4
#define DASD_ECKD_CCW_PFX 0xE7
#define DASD_ECKD_CCW_PFX_READ 0xEA
#define DASD_ECKD_CCW_RSCK 0xF9
/*
* Perform Subsystem Function / Sub-Orders
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_SSC 0x1D
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
*/
#define LV_COMPAT_CYL 0xFFFE
/*****************************************************************************
* SECTION: Type Definitions
****************************************************************************/
struct eckd_count {
__u16 cyl;
__u16 head;
__u8 record;
__u8 kl;
__u16 dl;
} __attribute__ ((packed));
struct ch_t {
__u16 cyl;
__u16 head;
} __attribute__ ((packed));
struct chs_t {
__u16 cyl;
__u16 head;
__u32 sector;
} __attribute__ ((packed));
struct chr_t {
__u16 cyl;
__u16 head;
__u8 record;
} __attribute__ ((packed));
struct geom_t {
__u16 cyl;
__u16 head;
__u32 sector;
} __attribute__ ((packed));
struct eckd_home {
__u8 skip_control[14];
__u16 cell_number;
__u8 physical_addr[3];
__u8 flag;
struct ch_t track_addr;
__u8 reserved;
__u8 key_length;
__u8 reserved2[2];
} __attribute__ ((packed));
struct DE_eckd_data {
struct {
unsigned char perm:2; /* Permissions on this extent */
unsigned char reserved:1;
unsigned char seek:2; /* Seek control */
unsigned char auth:2; /* Access authorization */
unsigned char pci:1; /* PCI Fetch mode */
} __attribute__ ((packed)) mask;
struct {
unsigned char mode:2; /* Architecture mode */
unsigned char ckd:1; /* CKD Conversion */
unsigned char operation:3; /* Operation mode */
unsigned char cfw:1; /* Cache fast write */
unsigned char dfw:1; /* DASD fast write */
} __attribute__ ((packed)) attributes;
__u16 blk_size; /* Blocksize */
__u16 fast_write_id;
__u8 ga_additional; /* Global Attributes Additional */
__u8 ga_extended; /* Global Attributes Extended */
struct ch_t beg_ext;
struct ch_t end_ext;
unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
__u8 ep_format; /* Extended Parameter format byte */
__u8 ep_prio; /* Extended Parameter priority I/O byte */
__u8 ep_reserved1; /* Extended Parameter Reserved */
__u8 ep_rec_per_track; /* Number of records on a track */
__u8 ep_reserved[4]; /* Extended Parameter Reserved */
} __attribute__ ((packed));
struct LO_eckd_data {
struct {
unsigned char orientation:2;
unsigned char operation:6;
} __attribute__ ((packed)) operation;
struct {
unsigned char last_bytes_used:1;
unsigned char reserved:6;
unsigned char read_count_suffix:1;
} __attribute__ ((packed)) auxiliary;
__u8 unused;
__u8 count;
struct ch_t seek_addr;
struct chr_t search_arg;
__u8 sector;
__u16 length;
} __attribute__ ((packed));
struct LRE_eckd_data {
struct {
unsigned char orientation:2;
unsigned char operation:6;
} __attribute__ ((packed)) operation;
struct {
unsigned char length_valid:1;
unsigned char length_scope:1;
unsigned char imbedded_ccw_valid:1;
unsigned char check_bytes:2;
unsigned char imbedded_count_valid:1;
unsigned char reserved:1;
unsigned char read_count_suffix:1;
} __attribute__ ((packed)) auxiliary;
__u8 imbedded_ccw;
__u8 count;
struct ch_t seek_addr;
struct chr_t search_arg;
__u8 sector;
__u16 length;
__u8 imbedded_count;
__u8 extended_operation;
__u16 extended_parameter_length;
__u8 extended_parameter[0];
} __attribute__ ((packed));
/* Prefix data for format 0x00 and 0x01 */
struct PFX_eckd_data {
unsigned char format;
struct {
unsigned char define_extent:1;
unsigned char time_stamp:1;
unsigned char verify_base:1;
unsigned char hyper_pav:1;
unsigned char reserved:4;
} __attribute__ ((packed)) validity;
__u8 base_address;
__u8 aux;
__u8 base_lss;
__u8 reserved[7];
struct DE_eckd_data define_extent;
struct LRE_eckd_data locate_record;
} __attribute__ ((packed));
struct dasd_eckd_characteristics {
__u16 cu_type;
struct {
unsigned char support:2;
unsigned char async:1;
unsigned char reserved:1;
unsigned char cache_info:1;
unsigned char model:3;
} __attribute__ ((packed)) cu_model;
__u16 dev_type;
__u8 dev_model;
struct {
unsigned char mult_burst:1;
unsigned char RT_in_LR:1;
unsigned char reserved1:1;
unsigned char RD_IN_LR:1;
unsigned char reserved2:4;
unsigned char reserved3:8;
unsigned char defect_wr:1;
unsigned char XRC_supported:1;
unsigned char reserved4:1;
unsigned char striping:1;
unsigned char reserved5:4;
unsigned char cfw:1;
unsigned char reserved6:2;
unsigned char cache:1;
unsigned char dual_copy:1;
unsigned char dfw:1;
unsigned char reset_alleg:1;
unsigned char sense_down:1;
} __attribute__ ((packed)) facilities;
__u8 dev_class;
__u8 unit_type;
__u16 no_cyl;
__u16 trk_per_cyl;
__u8 sec_per_trk;
__u8 byte_per_track[3];
__u16 home_bytes;
__u8 formula;
union {
struct {
__u8 f1;
__u16 f2;
__u16 f3;
} __attribute__ ((packed)) f_0x01;
struct {
__u8 f1;
__u8 f2;
__u8 f3;
__u8 f4;
__u8 f5;
} __attribute__ ((packed)) f_0x02;
} __attribute__ ((packed)) factors;
__u16 first_alt_trk;
__u16 no_alt_trk;
__u16 first_dia_trk;
__u16 no_dia_trk;
__u16 first_sup_trk;
__u16 no_sup_trk;
__u8 MDR_ID;
__u8 OBR_ID;
__u8 director;
__u8 rd_trk_set;
__u16 max_rec_zero;
__u8 reserved1;
__u8 RWANY_in_LR;
__u8 factor6;
__u8 factor7;
__u8 factor8;
__u8 reserved2[3];
__u8 reserved3[6];
__u32 long_no_cyl;
} __attribute__ ((packed));
/* elements of the configuration data */
struct dasd_ned {
struct {
__u8 identifier:2;
__u8 token_id:1;
__u8 sno_valid:1;
__u8 subst_sno:1;
__u8 recNED:1;
__u8 emuNED:1;
__u8 reserved:1;
} __attribute__ ((packed)) flags;
__u8 descriptor;
__u8 dev_class;
__u8 reserved;
__u8 dev_type[6];
__u8 dev_model[3];
__u8 HDA_manufacturer[3];
__u8 HDA_location[2];
__u8 HDA_seqno[12];
__u8 ID;
__u8 unit_addr;
} __attribute__ ((packed));
struct dasd_sneq {
struct {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
__u8 res1;
__u16 format;
__u8 res2[4]; /* byte 4- 7 */
__u8 sua_flags; /* byte 8 */
__u8 base_unit_addr; /* byte 9 */
__u8 res3[22]; /* byte 10-31 */
} __attribute__ ((packed));
struct vd_sneq {
struct {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
__u8 res1;
__u16 format;
__u8 res2[4]; /* byte 4- 7 */
__u8 uit[16]; /* byte 8-23 */
__u8 res3[8]; /* byte 24-31 */
} __attribute__ ((packed));
struct dasd_gneq {
struct {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
__u8 reserved[7];
__u16 subsystemID;
__u8 reserved2[22];
} __attribute__ ((packed));
struct dasd_eckd_path {
__u8 opm;
__u8 ppm;
__u8 npm;
};
struct dasd_rssd_features {
char feature[256];
} __attribute__((packed));
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data
*/
struct dasd_psf_prssd_data {
unsigned char order;
unsigned char flags;
unsigned char reserved[4];
unsigned char suborder;
unsigned char varies[5];
} __attribute__ ((packed));
/*
* Perform Subsystem Function - Set Subsystem Characteristics
*/
struct dasd_psf_ssc_data {
unsigned char order;
unsigned char flags;
unsigned char cu_type[4];
unsigned char suborder;
unsigned char reserved[59];
} __attribute__((packed));
/*
* some structures and definitions for alias handling
*/
struct dasd_unit_address_configuration {
struct {
char ua_type;
char base_ua;
} unit[256];
} __attribute__((packed));
#define MAX_DEVICES_PER_LCU 256
/* flags on the LCU */
#define NEED_UAC_UPDATE 0x01
#define UPDATE_PENDING 0x02
enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
struct alias_root {
struct list_head serverlist;
spinlock_t lock;
};
struct alias_server {
struct list_head server;
struct dasd_uid uid;
struct list_head lculist;
};
struct summary_unit_check_work_data {
char reason;
struct dasd_device *device;
struct work_struct worker;
};
struct read_uac_work_data {
struct dasd_device *device;
struct delayed_work dwork;
};
struct alias_lcu {
struct list_head lcu;
struct dasd_uid uid;
enum pavtype pav;
char flags;
spinlock_t lock;
struct list_head grouplist;
struct list_head active_devices;
struct list_head inactive_devices;
struct dasd_unit_address_configuration *uac;
struct summary_unit_check_work_data suc_data;
struct read_uac_work_data ruac_data;
struct dasd_ccw_req *rsu_cqr;
};
struct alias_pav_group {
struct list_head group;
struct dasd_uid uid;
struct alias_lcu *lcu;
struct list_head baselist;
struct list_head aliaslist;
struct dasd_device *next;
};
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
struct vd_sneq *vdsneq;
struct dasd_gneq *gneq;
struct dasd_eckd_path path_data;
struct eckd_count count_area[5];
int init_cqr_status;
int uses_cdl;
struct attrib_data_t attrib; /* e.g. cache operations */
struct dasd_rssd_features features;
u32 real_cyl;
/* alias managemnet */
struct dasd_uid uid;
struct alias_pav_group *pavgroup;
struct alias_lcu *lcu;
int count;
};
int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
int dasd_alias_add_device(struct dasd_device *);
int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
#endif /* DASD_ECKD_H */

View File

@@ -0,0 +1,712 @@
/*
* Character device driver for extended error reporting.
*
* Copyright (C) 2005 IBM Corporation
* extended error reporting for DASD ECKD devices
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/smp_lock.h>
#include <linux/err.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eer):"
/*
* SECTION: the internal buffer
*/
/*
* The internal buffer is meant to store obaque blobs of data, so it does
* not know of higher level concepts like triggers.
* It consists of a number of pages that are used as a ringbuffer. Each data
* blob is stored in a simple record that consists of an integer, which
* contains the size of the following data, and the data bytes themselfes.
*
* To allow for multiple independent readers we create one internal buffer
* each time the device is opened and destroy the buffer when the file is
* closed again. The number of pages used for this buffer is determined by
* the module parmeter eer_pages.
*
* One record can be written to a buffer by using the functions
* - dasd_eer_start_record (one time per record to write the size to the
* buffer and reserve the space for the data)
* - dasd_eer_write_buffer (one or more times per record to write the data)
* The data can be written in several steps but you will have to compute
* the total size up front for the invocation of dasd_eer_start_record.
* If the ringbuffer is full, dasd_eer_start_record will remove the required
* number of old records.
*
* A record is typically read in two steps, first read the integer that
* specifies the size of the following data, then read the data.
* Both can be done by
* - dasd_eer_read_buffer
*
* For all mentioned functions you need to get the bufferlock first and keep
* it until a complete record is written or read.
*
* All information necessary to keep track of an internal buffer is kept in
* a struct eerbuffer. The buffer specific to a file pointer is strored in
* the private_data field of that file. To be able to write data to all
* existing buffers, each buffer is also added to the bufferlist.
* If the user does not want to read a complete record in one go, we have to
* keep track of the rest of the record. residual stores the number of bytes
* that are still to deliver. If the rest of the record is invalidated between
* two reads then residual will be set to -1 so that the next read will fail.
* All entries in the eerbuffer structure are protected with the bufferlock.
* To avoid races between writing to a buffer on the one side and creating
* and destroying buffers on the other side, the bufferlock must also be used
* to protect the bufferlist.
*/
static int eer_pages = 5;
module_param(eer_pages, int, S_IRUGO|S_IWUSR);
struct eerbuffer {
struct list_head list;
char **buffer;
int buffersize;
int buffer_page_count;
int head;
int tail;
int residual;
};
static LIST_HEAD(bufferlist);
static DEFINE_SPINLOCK(bufferlock);
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
/*
* How many free bytes are available on the buffer.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
{
if (eerb->head < eerb->tail)
return eerb->tail - eerb->head - 1;
return eerb->buffersize - eerb->head + eerb->tail -1;
}
/*
* How many bytes of buffer space are used.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
{
if (eerb->head >= eerb->tail)
return eerb->head - eerb->tail;
return eerb->buffersize - eerb->tail + eerb->head;
}
/*
* The dasd_eer_write_buffer function just copies count bytes of data
* to the buffer. Make sure to call dasd_eer_start_record first, to
* make sure that enough free space is available.
* Needs to be called with bufferlock held.
*/
static void dasd_eer_write_buffer(struct eerbuffer *eerb,
char *data, int count)
{
unsigned long headindex,localhead;
unsigned long rest, len;
char *nextdata;
nextdata = data;
rest = count;
while (rest > 0) {
headindex = eerb->head / PAGE_SIZE;
localhead = eerb->head % PAGE_SIZE;
len = min(rest, PAGE_SIZE - localhead);
memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
nextdata += len;
rest -= len;
eerb->head += len;
if (eerb->head == eerb->buffersize)
eerb->head = 0; /* wrap around */
BUG_ON(eerb->head > eerb->buffersize);
}
}
/*
* Needs to be called with bufferlock held.
*/
static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
{
unsigned long tailindex,localtail;
unsigned long rest, len, finalcount;
char *nextdata;
finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
nextdata = data;
rest = finalcount;
while (rest > 0) {
tailindex = eerb->tail / PAGE_SIZE;
localtail = eerb->tail % PAGE_SIZE;
len = min(rest, PAGE_SIZE - localtail);
memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
nextdata += len;
rest -= len;
eerb->tail += len;
if (eerb->tail == eerb->buffersize)
eerb->tail = 0; /* wrap around */
BUG_ON(eerb->tail > eerb->buffersize);
}
return finalcount;
}
/*
* Whenever you want to write a blob of data to the internal buffer you
* have to start by using this function first. It will write the number
* of bytes that will be written to the buffer. If necessary it will remove
* old records to make room for the new one.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
{
int tailcount;
if (count + sizeof(count) > eerb->buffersize)
return -ENOMEM;
while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
if (eerb->residual > 0) {
eerb->tail += eerb->residual;
if (eerb->tail >= eerb->buffersize)
eerb->tail -= eerb->buffersize;
eerb->residual = -1;
}
dasd_eer_read_buffer(eerb, (char *) &tailcount,
sizeof(tailcount));
eerb->tail += tailcount;
if (eerb->tail >= eerb->buffersize)
eerb->tail -= eerb->buffersize;
}
dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
return 0;
};
/*
* Release pages that are not used anymore.
*/
static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
{
int i;
for (i = 0; i < no_pages; i++)
free_page((unsigned long) buf[i]);
}
/*
* Allocate a new set of memory pages.
*/
static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
{
int i;
for (i = 0; i < no_pages; i++) {
buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
if (!buf[i]) {
dasd_eer_free_buffer_pages(buf, i);
return -ENOMEM;
}
}
return 0;
}
/*
* SECTION: The extended error reporting functionality
*/
/*
* When a DASD device driver wants to report an error, it calls the
* function dasd_eer_write and gives the respective trigger ID as
* parameter. Currently there are four kinds of triggers:
*
* DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
* DASD_EER_PPRCSUSPEND: PPRC was suspended
* DASD_EER_NOPATH: There is no path to the device left.
* DASD_EER_STATECHANGE: The state of the device has changed.
*
* For the first three triggers all required information can be supplied by
* the caller. For these triggers a record is written by the function
* dasd_eer_write_standard_trigger.
*
* The DASD_EER_STATECHANGE trigger is special since a sense subsystem
* status ccw need to be executed to gather the necessary sense data first.
* The dasd_eer_snss function will queue the SNSS request and the request
* callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
* trigger.
*
* To avoid memory allocations at runtime, the necessary memory is allocated
* when the extended error reporting is enabled for a device (by
* dasd_eer_probe). There is one sense subsystem status request for each
* eer enabled DASD device. The presence of the cqr in device->eer_cqr
* indicates that eer is enable for the device. The use of the snss request
* is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
* that the cqr is currently in use, dasd_eer_snss cannot start a second
* request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
* the SNSS request will check the bit and call dasd_eer_snss again.
*/
#define SNSS_DATA_SIZE 44
#define DASD_EER_BUSID_SIZE 10
struct dasd_eer_header {
__u32 total_size;
__u32 trigger;
__u64 tv_sec;
__u64 tv_usec;
char busid[DASD_EER_BUSID_SIZE];
} __attribute__ ((packed));
/*
* The following function can be used for those triggers that have
* all necessary data available when the function is called.
* If the parameter cqr is not NULL, the chain of requests will be searched
* for valid sense data, and all valid sense data sets will be added to
* the triggers data.
*/
static void dasd_eer_write_standard_trigger(struct dasd_device *device,
struct dasd_ccw_req *cqr,
int trigger)
{
struct dasd_ccw_req *temp_cqr;
int data_size;
struct timeval tv;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
char *sense;
/* go through cqr chain and count the valid sense data sets */
data_size = 0;
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
if (dasd_get_sense(&temp_cqr->irb))
data_size += 32;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = trigger;
do_gettimeofday(&tv);
header.tv_sec = tv.tv_sec;
header.tv_usec = tv.tv_usec;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
sense = dasd_get_sense(&temp_cqr->irb);
if (sense)
dasd_eer_write_buffer(eerb, sense, 32);
}
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
wake_up_interruptible(&dasd_eer_read_wait_queue);
}
/*
* This function writes a DASD_EER_STATECHANGE trigger.
*/
static void dasd_eer_write_snss_trigger(struct dasd_device *device,
struct dasd_ccw_req *cqr,
int trigger)
{
int data_size;
int snss_rc;
struct timeval tv;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
if (snss_rc)
data_size = 0;
else
data_size = SNSS_DATA_SIZE;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = DASD_EER_STATECHANGE;
do_gettimeofday(&tv);
header.tv_sec = tv.tv_sec;
header.tv_usec = tv.tv_usec;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
if (!snss_rc)
dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
wake_up_interruptible(&dasd_eer_read_wait_queue);
}
/*
* This function is called for all triggers. It calls the appropriate
* function that writes the actual trigger records.
*/
void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
unsigned int id)
{
if (!device->eer_cqr)
return;
switch (id) {
case DASD_EER_FATALERROR:
case DASD_EER_PPRCSUSPEND:
dasd_eer_write_standard_trigger(device, cqr, id);
break;
case DASD_EER_NOPATH:
dasd_eer_write_standard_trigger(device, NULL, id);
break;
case DASD_EER_STATECHANGE:
dasd_eer_write_snss_trigger(device, cqr, id);
break;
default: /* unknown trigger, so we write it without any sense data */
dasd_eer_write_standard_trigger(device, NULL, id);
break;
}
}
EXPORT_SYMBOL(dasd_eer_write);
/*
* Start a sense subsystem status request.
* Needs to be called with the device held.
*/
void dasd_eer_snss(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
cqr = device->eer_cqr;
if (!cqr) /* Device not eer enabled. */
return;
if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
/* Sense subsystem status request in use. */
set_bit(DASD_FLAG_EER_SNSS, &device->flags);
return;
}
/* cdev is already locked, can't use dasd_add_request_head */
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
dasd_schedule_device_bh(device);
}
/*
* Callback function for use with sense subsystem status request.
*/
static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (device->eer_cqr == cqr) {
clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
/* Another SNSS has been requested in the meantime. */
dasd_eer_snss(device);
cqr = NULL;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
/*
* Extended error recovery has been switched off while
* the SNSS request was running. It could even have
* been switched off and on again in which case there
* is a new ccw in device->eer_cqr. Free the "old"
* snss request now.
*/
dasd_kfree_request(cqr, device);
}
/*
* Enable error reporting on a given device.
*/
int dasd_eer_enable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
struct ccw1 *ccw;
if (device->eer_cqr)
return 0;
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
if (IS_ERR(cqr))
return -ENOMEM;
cqr->startdev = device;
cqr->retries = 255;
cqr->expires = 10 * HZ;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
ccw->cda = (__u32)(addr_t) cqr->data;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!device->eer_cqr) {
device->eer_cqr = cqr;
cqr = NULL;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
dasd_kfree_request(cqr, device);
return 0;
}
/*
* Disable error reporting on a given device.
*/
void dasd_eer_disable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
int in_use;
if (!device->eer_cqr)
return;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr = device->eer_cqr;
device->eer_cqr = NULL;
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr && !in_use)
dasd_kfree_request(cqr, device);
}
/*
* SECTION: the device operations
*/
/*
* On the one side we need a lock to access our internal buffer, on the
* other side a copy_to_user can sleep. So we need to copy the data we have
* to transfer in a readbuffer, which is protected by the readbuffer_mutex.
*/
static char readbuffer[PAGE_SIZE];
static DEFINE_MUTEX(readbuffer_mutex);
static int dasd_eer_open(struct inode *inp, struct file *filp)
{
struct eerbuffer *eerb;
unsigned long flags;
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
if (!eerb)
return -ENOMEM;
lock_kernel();
eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
kfree(eerb);
DBF_EVENT(DBF_WARNING, "can't open device since module "
"parameter eer_pages is smaller than 1 or"
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
unlock_kernel();
return -EINVAL;
}
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
GFP_KERNEL);
if (!eerb->buffer) {
kfree(eerb);
unlock_kernel();
return -ENOMEM;
}
if (dasd_eer_allocate_buffer_pages(eerb->buffer,
eerb->buffer_page_count)) {
kfree(eerb->buffer);
kfree(eerb);
unlock_kernel();
return -ENOMEM;
}
filp->private_data = eerb;
spin_lock_irqsave(&bufferlock, flags);
list_add(&eerb->list, &bufferlist);
spin_unlock_irqrestore(&bufferlock, flags);
unlock_kernel();
return nonseekable_open(inp,filp);
}
static int dasd_eer_close(struct inode *inp, struct file *filp)
{
struct eerbuffer *eerb;
unsigned long flags;
eerb = (struct eerbuffer *) filp->private_data;
spin_lock_irqsave(&bufferlock, flags);
list_del(&eerb->list);
spin_unlock_irqrestore(&bufferlock, flags);
dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
kfree(eerb->buffer);
kfree(eerb);
return 0;
}
static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
int tc,rc;
int tailcount,effective_count;
unsigned long flags;
struct eerbuffer *eerb;
eerb = (struct eerbuffer *) filp->private_data;
if (mutex_lock_interruptible(&readbuffer_mutex))
return -ERESTARTSYS;
spin_lock_irqsave(&bufferlock, flags);
if (eerb->residual < 0) { /* the remainder of this record */
/* has been deleted */
eerb->residual = 0;
spin_unlock_irqrestore(&bufferlock, flags);
mutex_unlock(&readbuffer_mutex);
return -EIO;
} else if (eerb->residual > 0) {
/* OK we still have a second half of a record to deliver */
effective_count = min(eerb->residual, (int) count);
eerb->residual -= effective_count;
} else {
tc = 0;
while (!tc) {
tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
sizeof(tailcount));
if (!tc) {
/* no data available */
spin_unlock_irqrestore(&bufferlock, flags);
mutex_unlock(&readbuffer_mutex);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
rc = wait_event_interruptible(
dasd_eer_read_wait_queue,
eerb->head != eerb->tail);
if (rc)
return rc;
if (mutex_lock_interruptible(&readbuffer_mutex))
return -ERESTARTSYS;
spin_lock_irqsave(&bufferlock, flags);
}
}
WARN_ON(tc != sizeof(tailcount));
effective_count = min(tailcount,(int)count);
eerb->residual = tailcount - effective_count;
}
tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
WARN_ON(tc != effective_count);
spin_unlock_irqrestore(&bufferlock, flags);
if (copy_to_user(buf, readbuffer, effective_count)) {
mutex_unlock(&readbuffer_mutex);
return -EFAULT;
}
mutex_unlock(&readbuffer_mutex);
return effective_count;
}
static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
{
unsigned int mask;
unsigned long flags;
struct eerbuffer *eerb;
eerb = (struct eerbuffer *) filp->private_data;
poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
spin_lock_irqsave(&bufferlock, flags);
if (eerb->head != eerb->tail)
mask = POLLIN | POLLRDNORM ;
else
mask = 0;
spin_unlock_irqrestore(&bufferlock, flags);
return mask;
}
static const struct file_operations dasd_eer_fops = {
.open = &dasd_eer_open,
.release = &dasd_eer_close,
.read = &dasd_eer_read,
.poll = &dasd_eer_poll,
.owner = THIS_MODULE,
};
static struct miscdevice *dasd_eer_dev = NULL;
int __init dasd_eer_init(void)
{
int rc;
dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
if (!dasd_eer_dev)
return -ENOMEM;
dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
dasd_eer_dev->name = "dasd_eer";
dasd_eer_dev->fops = &dasd_eer_fops;
rc = misc_register(dasd_eer_dev);
if (rc) {
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
"register misc device");
return rc;
}
return 0;
}
void dasd_eer_exit(void)
{
if (dasd_eer_dev) {
WARN_ON(misc_deregister(dasd_eer_dev) != 0);
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
}
}

View File

@@ -0,0 +1,184 @@
/*
* File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/init.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_erp:"
#include "dasd_int.h"
struct dasd_ccw_req *
dasd_alloc_erp_request(char *magic, int cplength, int datasize,
struct dasd_device * device)
{
unsigned long flags;
struct dasd_ccw_req *cqr;
char *data;
int size;
/* Sanity checks */
BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
spin_lock_irqsave(&device->mem_lock, flags);
cqr = (struct dasd_ccw_req *)
dasd_alloc_chunk(&device->erp_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
if (cqr == NULL)
return ERR_PTR(-ENOMEM);
memset(cqr, 0, sizeof(struct dasd_ccw_req));
INIT_LIST_HEAD(&cqr->devlist);
INIT_LIST_HEAD(&cqr->blocklist);
data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = (struct ccw1 *) data;
data += cplength*sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
}
strncpy((char *) &cqr->magic, magic, 4);
ASCEBC((char *) &cqr->magic, 4);
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
void
dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->erp_chunks, cqr);
spin_unlock_irqrestore(&device->mem_lock, flags);
atomic_dec(&device->ref_count);
}
/*
* dasd_default_erp_action just retries the current cqr
*/
struct dasd_ccw_req *
dasd_default_erp_action(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
device = cqr->startdev;
/* just retry - there is nothing to save ... I got no sense data.... */
if (cqr->retries > 0) {
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP called (%i retries left)",
cqr->retries);
cqr->lpm = LPM_ANYPATH;
cqr->status = DASD_CQR_FILLED;
} else {
pr_err("%s: default ERP has run out of retries and failed\n",
dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
}
return cqr;
} /* end dasd_default_erp_action */
/*
* DESCRIPTION
* Frees all ERPs of the current ERP Chain and set the status
* of the original CQR either to DASD_CQR_DONE if ERP was successful
* or to DASD_CQR_FAILED if ERP was NOT successful.
* NOTE: This function is only called if no discipline postaction
* is available
*
* PARAMETER
* erp current erp_head
*
* RETURN VALUES
* cqr pointer to the original CQR
*/
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
{
int success;
BUG_ON(cqr->refers == NULL || cqr->function == NULL);
success = cqr->status == DASD_CQR_DONE;
/* free all ERPs - but NOT the original cqr */
while (cqr->refers != NULL) {
struct dasd_ccw_req *refers;
refers = cqr->refers;
/* remove the request from the block queue */
list_del(&cqr->blocklist);
/* free the finished erp request */
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
/* set corresponding status to original cqr */
if (success)
cqr->status = DASD_CQR_DONE;
else {
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
}
return cqr;
} /* end default_erp_postaction */
void
dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_device *device;
device = cqr->startdev;
/* dump sense data */
if (device->discipline && device->discipline->dump_sense)
device->discipline->dump_sense(device, cqr, irb);
}
void
dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_device *device;
device = cqr->startdev;
/* dump sense data to s390 debugfeature*/
if (device->discipline && device->discipline->dump_sense_dbf)
device->discipline->dump_sense_dbf(device, irb, "log");
}
EXPORT_SYMBOL(dasd_log_sense_dbf);
EXPORT_SYMBOL(dasd_default_erp_action);
EXPORT_SYMBOL(dasd_default_erp_postaction);
EXPORT_SYMBOL(dasd_alloc_erp_request);
EXPORT_SYMBOL(dasd_free_erp_request);
EXPORT_SYMBOL(dasd_log_sense);

View File

@@ -0,0 +1,626 @@
/*
* File...........: linux/drivers/s390/block/dasd_fba.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd-fba"
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <asm/debug.h>
#include <linux/slab.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/todclk.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
#include "dasd_fba.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(fba):"
#define DASD_FBA_CCW_WRITE 0x41
#define DASD_FBA_CCW_READ 0x42
#define DASD_FBA_CCW_LOCATE 0x43
#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_fba_discipline;
struct dasd_fba_private {
struct dasd_fba_characteristics rdc_data;
};
static struct ccw_device_id dasd_fba_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
static struct ccw_driver dasd_fba_driver; /* see below */
static int
dasd_fba_probe(struct ccw_device *cdev)
{
return dasd_generic_probe(cdev, &dasd_fba_discipline);
}
static int
dasd_fba_set_online(struct ccw_device *cdev)
{
return dasd_generic_set_online(cdev, &dasd_fba_discipline);
}
static struct ccw_driver dasd_fba_driver = {
.name = "dasd-fba",
.owner = THIS_MODULE,
.ids = dasd_fba_ids,
.probe = dasd_fba_probe,
.remove = dasd_generic_remove,
.set_offline = dasd_generic_set_offline,
.set_online = dasd_fba_set_online,
.notify = dasd_generic_notify,
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
};
static void
define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
int blksize, int beg, int nr)
{
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = (__u32) __pa(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
else if (rw == READ)
(data->mask).perm = 0x1;
else
data->mask.perm = 0x2;
data->blk_size = blksize;
data->ext_loc = beg;
data->ext_end = nr - 1;
}
static void
locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
int block_nr, int block_ct)
{
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = (__u32) __pa(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
else if (rw == READ)
data->operation.cmd = 0x6;
else
data->operation.cmd = 0x8;
data->blk_nr = block_nr;
data->blk_ct = block_ct;
}
static int
dasd_fba_check_characteristics(struct dasd_device *device)
{
struct dasd_block *block;
struct dasd_fba_private *private;
struct ccw_device *cdev = device->cdev;
int rc;
private = (struct dasd_fba_private *) device->private;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD "
"data failed\n");
return -ENOMEM;
}
device->private = (void *) private;
} else {
memset(private, 0, sizeof(*private));
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
"dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
}
device->block = block;
block->base = device;
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
"characteristics returned error %d", rc);
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
kfree(private);
return rc;
}
dev_info(&device->cdev->dev,
"New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
"and %d B/blk\n",
cdev->id.dev_type,
cdev->id.dev_model,
cdev->id.cu_type,
cdev->id.cu_model,
((private->rdc_data.blk_bdsa *
(private->rdc_data.blk_size >> 9)) >> 11),
private->rdc_data.blk_size);
return 0;
}
static int dasd_fba_do_analysis(struct dasd_block *block)
{
struct dasd_fba_private *private;
int sb, rc;
private = (struct dasd_fba_private *) block->base->private;
rc = dasd_check_blocksize(private->rdc_data.blk_size);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
private->rdc_data.blk_size);
return rc;
}
block->blocks = private->rdc_data.blk_bdsa;
block->bp_block = private->rdc_data.blk_size;
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
block->s2b_shift++;
return 0;
}
static int dasd_fba_fill_geometry(struct dasd_block *block,
struct hd_geometry *geo)
{
if (dasd_check_blocksize(block->bp_block) != 0)
return -EINVAL;
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
geo->heads = 16;
geo->sectors = 128 >> block->s2b_shift;
return 0;
}
static dasd_erp_fn_t
dasd_fba_erp_action(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_action;
}
static dasd_erp_fn_t
dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
{
if (cqr->function == dasd_default_erp_action)
return dasd_default_erp_postaction;
DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
cqr->function);
return NULL;
}
static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
struct irb *irb)
{
char mask;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.cmd.dstat & mask) == mask) {
dasd_generic_handle_state_change(device);
return;
}
/* check for unsolicited interrupts */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"unsolicited interrupt received");
device->discipline->dump_sense_dbf(device, irb, "unsolicited");
dasd_schedule_device_bh(device);
return;
};
static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
struct dasd_block *block,
struct request *req)
{
struct dasd_fba_private *private;
unsigned long *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec *bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
private = (struct dasd_fba_private *) block->base->private;
if (rq_data_dir(req) == READ) {
cmd = DASD_FBA_CCW_READ;
} else if (rq_data_dir(req) == WRITE) {
cmd = DASD_FBA_CCW_WRITE;
} else
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (block->s2b_shift + 9);
#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
cidaw += bv->bv_len / blksize;
#endif
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* 1x define extent + 1x locate record + number of blocks */
cplength = 2 + count;
/* 1x define extent + 1x locate record */
datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
cidaw * sizeof(unsigned long);
/*
* Find out number of additional locate record ccws if the device
* can't do data chaining.
*/
if (private->rdc_data.mode.bits.data_chain == 0) {
cplength += count - 1;
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
/* Locate record for all blocks for smart devices. */
if (private->rdc_data.mode.bits.data_chain != 0) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
if (copy)
dst = copy + bv->bv_offset;
}
for (off = 0; off < bv->bv_len; off += blksize) {
/* Locate record for stupid devices. */
if (private->rdc_data.mode.bits.data_chain == 0) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw, LO_data++,
rq_data_dir(req),
recid - first_rec, 1);
ccw->flags = CCW_FLAG_CC;
ccw++;
} else {
if (recid > first_rec)
ccw[-1].flags |= CCW_FLAG_DC;
else
ccw[-1].flags |= CCW_FLAG_CC;
}
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
ccw->cda = (__u32)(addr_t) idaws;
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
ccw->cda = (__u32)(addr_t) dst;
ccw->flags = 0;
}
ccw++;
dst += blksize;
recid++;
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */
cqr->retries = 32;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static int
dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_fba_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, off;
int status;
if (!dasd_page_cache)
goto out;
private = (struct dasd_fba_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
cda = *((char **)((addr_t) ccw->cda));
else
cda = (char *)((addr_t) ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv->bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
dst = NULL;
}
ccw++;
}
}
out:
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
{
cqr->status = DASD_CQR_FILLED;
};
static int
dasd_fba_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
info->label_block = 1;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
info->characteristics_size = sizeof(struct dasd_fba_characteristics);
memcpy(info->characteristics,
&((struct dasd_fba_private *) device->private)->rdc_data,
sizeof (struct dasd_fba_characteristics));
info->confdata_size = 0;
return 0;
}
static void
dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
char *reason)
{
u64 *sense;
sense = (u64 *) dasd_get_sense(irb);
if (sense) {
DBF_DEV_EVENT(DBF_EMERG, device,
"%s: %s %02x%02x%02x %016llx %016llx %016llx "
"%016llx", reason,
scsw_is_tm(&irb->scsw) ? "t" : "c",
scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
scsw_dstat(&irb->scsw), sense[0], sense[1],
sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s",
"SORRY - NO VALID SENSE AVAILABLE\n");
}
}
static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
char *page;
struct ccw1 *act, *end, *last;
int len, sl, sct, count;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to dump sense data");
return;
}
len = sprintf(page, KERN_ERR PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
irb->ecw[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
} else {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
act = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
len = sprintf(page, KERN_ERR PRINTK_HEADER
" Related CP in req: %p\n", req);
while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
printk(KERN_ERR "%s", page);
/* print failing CCW area */
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
}
while (act <= last) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
if (len > 0)
printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
/*
* max_blocks is dependent on the amount of storage that is available
* in the static io buffer for each device. Currently each device has
* 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
* 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
* up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
* addition we have one define extent ccw + 16 bytes of data and a
* locate record ccw for each block (stupid devices!) + 16 bytes of data.
* That makes:
* (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
* We want to fit two into the available memory so that we can immediately
* start the next request if one finishes off. That makes 100.1 blocks
* for one request. Give a little safety and the result is 96.
*/
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
.max_blocks = 96,
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_fba_handle_terminated_request,
.erp_action = dasd_fba_erp_action,
.erp_postaction = dasd_fba_erp_postaction,
.handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
.build_cp = dasd_fba_build_cp,
.free_cp = dasd_fba_free_cp,
.dump_sense = dasd_fba_dump_sense,
.dump_sense_dbf = dasd_fba_dump_sense_dbf,
.fill_info = dasd_fba_fill_info,
};
static int __init
dasd_fba_init(void)
{
int ret;
ASCEBC(dasd_fba_discipline.ebcname, 4);
ret = ccw_driver_register(&dasd_fba_driver);
if (!ret)
wait_for_device_probe();
return ret;
}
static void __exit
dasd_fba_cleanup(void)
{
ccw_driver_unregister(&dasd_fba_driver);
}
module_init(dasd_fba_init);
module_exit(dasd_fba_cleanup);

View File

@@ -0,0 +1,72 @@
/*
* File...........: linux/drivers/s390/block/dasd_fba.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
*/
#ifndef DASD_FBA_H
#define DASD_FBA_H
struct DE_fba_data {
struct {
unsigned char perm:2; /* Permissions on this extent */
unsigned char zero:2; /* Must be zero */
unsigned char da:1; /* usually zero */
unsigned char diag:1; /* allow diagnose */
unsigned char zero2:2; /* zero */
} __attribute__ ((packed)) mask;
__u8 zero; /* Must be zero */
__u16 blk_size; /* Blocksize */
__u32 ext_loc; /* Extent locator */
__u32 ext_beg; /* logical number of block 0 in extent */
__u32 ext_end; /* logocal number of last block in extent */
} __attribute__ ((packed));
struct LO_fba_data {
struct {
unsigned char zero:4;
unsigned char cmd:4;
} __attribute__ ((packed)) operation;
__u8 auxiliary;
__u16 blk_ct;
__u32 blk_nr;
} __attribute__ ((packed));
struct dasd_fba_characteristics {
union {
__u8 c;
struct {
unsigned char reserved:1;
unsigned char overrunnable:1;
unsigned char burst_byte:1;
unsigned char data_chain:1;
unsigned char zeros:4;
} __attribute__ ((packed)) bits;
} __attribute__ ((packed)) mode;
union {
__u8 c;
struct {
unsigned char zero0:1;
unsigned char removable:1;
unsigned char shared:1;
unsigned char zero1:1;
unsigned char mam:1;
unsigned char zeros:3;
} __attribute__ ((packed)) bits;
} __attribute__ ((packed)) features;
__u8 dev_class;
__u8 unit_type;
__u16 blk_size;
__u32 blk_per_cycl;
__u32 blk_per_bound;
__u32 blk_bdsa;
__u32 reserved0;
__u16 reserved1;
__u16 blk_ce;
__u32 reserved2;
__u16 reserved3;
} __attribute__ ((packed));
#endif /* DASD_FBA_H */

View File

@@ -0,0 +1,178 @@
/*
* File...........: linux/drivers/s390/block/dasd_genhd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
* gendisk related functions for the dasd driver.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_gendisk:"
#include "dasd_int.h"
/*
* Allocate and register gendisk structure for device.
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
struct gendisk *gdp;
struct dasd_device *base;
int len;
/* Make sure the minor for this device exists. */
base = block->base;
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
gdp = alloc_disk(1 << DASD_PARTN_BITS);
if (!gdp)
return -ENOMEM;
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
gdp->driverfs_dev = &base->cdev->dev;
/*
* Set device name.
* dasda - dasdz : 26 devices
* dasdaa - dasdzz : 676 devices, added up = 702
* dasdaaa - dasdzzz : 17576 devices, added up = 18278
* dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
*/
len = sprintf(gdp->disk_name, "dasd");
if (base->devindex > 25) {
if (base->devindex > 701) {
if (base->devindex > 18277)
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-18278)
/17576)%26));
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-702)/676)%26));
}
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-26)/26)%26));
}
len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
if (block->base->features & DASD_FEATURE_READONLY)
set_disk_ro(gdp, 1);
gdp->private_data = block;
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
add_disk(block->gdp);
return 0;
}
/*
* Unregister and free gendisk structure for device.
*/
void dasd_gendisk_free(struct dasd_block *block)
{
if (block->gdp) {
del_gendisk(block->gdp);
block->gdp->queue = NULL;
put_disk(block->gdp);
block->gdp = NULL;
}
}
/*
* Trigger a partition detection.
*/
int dasd_scan_partitions(struct dasd_block *block)
{
struct block_device *bdev;
bdev = bdget_disk(block->gdp, 0);
if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
return -ENODEV;
/*
* See fs/partition/check.c:register_disk,rescan_partitions
* Can't call rescan_partitions directly. Use ioctl.
*/
ioctl_by_bdev(bdev, BLKRRPART, 0);
/*
* Since the matching blkdev_put call to the blkdev_get in
* this function is not called before dasd_destroy_partitions
* the offline open_count limit needs to be increased from
* 0 to 1. This is done by setting device->bdev (see
* dasd_generic_set_offline). As long as the partition
* detection is running no offline should be allowed. That
* is why the assignment to device->bdev is done AFTER
* the BLKRRPART ioctl.
*/
block->bdev = bdev;
return 0;
}
/*
* Remove all inodes in the system for a device, delete the
* partitions and make device unusable by setting its size to zero.
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
/* The two structs have 168/176 byte on 31/64 bit. */
struct blkpg_partition bpart;
struct blkpg_ioctl_arg barg;
struct block_device *bdev;
/*
* Get the bdev pointer from the device structure and clear
* device->bdev to lower the offline open_count limit again.
*/
bdev = block->bdev;
block->bdev = NULL;
/*
* See fs/partition/check.c:delete_partition
* Can't call delete_partitions directly. Use ioctl.
* The ioctl also does locking and invalidation.
*/
memset(&bpart, 0, sizeof(struct blkpg_partition));
memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
barg.data = (void __force __user *) &bpart;
barg.op = BLKPG_DEL_PARTITION;
for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
invalidate_partition(block->gdp, 0);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, FMODE_READ);
set_capacity(block->gdp, 0);
}
int dasd_gendisk_init(void)
{
int rc;
/* Register to static dasd major 94 */
rc = register_blkdev(DASD_MAJOR, "dasd");
if (rc != 0) {
pr_warning("Registering the device driver with major number "
"%d failed\n", DASD_MAJOR);
return rc;
}
return 0;
}
void dasd_gendisk_exit(void)
{
unregister_blkdev(DASD_MAJOR, "dasd");
}

View File

@@ -0,0 +1,690 @@
/*
* File...........: linux/drivers/s390/block/dasd_int.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
*/
#ifndef DASD_INT_H
#define DASD_INT_H
#ifdef __KERNEL__
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
/*
* States a dasd device can have:
* new: the dasd_device structure is allocated.
* known: the discipline for the device is identified.
* basic: the device can do basic i/o.
* unfmt: the device could not be analyzed (format is unknown).
* ready: partition detection is done and the device is can do block io.
* online: the device accepts requests from the block device queue.
*
* Things to do for startup state transitions:
* new -> known: find discipline for the device and create devfs entries.
* known -> basic: request irq line for the device.
* basic -> ready: do the initial analysis, e.g. format detection,
* do block device setup and detect partitions.
* ready -> online: schedule the device tasklet.
* Things to do for shutdown state transitions:
* online -> ready: just set the new device state.
* ready -> basic: flush requests from the block device layer, clear
* partition information and reset format information.
* basic -> known: terminate all requests and free irq.
* known -> new: remove devfs entries and forget discipline.
*/
#define DASD_STATE_NEW 0
#define DASD_STATE_KNOWN 1
#define DASD_STATE_BASIC 2
#define DASD_STATE_UNFMT 3
#define DASD_STATE_READY 4
#define DASD_STATE_ONLINE 5
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/interrupt.h>
#include <linux/log2.h>
#include <asm/ccwdev.h>
#include <linux/workqueue.h>
#include <asm/debug.h>
#include <asm/dasd.h>
#include <asm/idals.h>
/* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4
#define DASD_DIAG_MAGIC 0xC4C9C1C7
#define DASD_FBA_MAGIC 0xC6C2C140
/*
* SECTION: Type definitions
*/
struct dasd_device;
struct dasd_block;
/* BIT DEFINITIONS FOR SENSE DATA */
#define DASD_SENSE_BIT_0 0x80
#define DASD_SENSE_BIT_1 0x40
#define DASD_SENSE_BIT_2 0x20
#define DASD_SENSE_BIT_3 0x10
/* BIT DEFINITIONS FOR SIM SENSE */
#define DASD_SIM_SENSE 0x0F
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
do { \
debug_sprintf_event(d_device->debug_area, \
d_level, \
d_str "\n", \
d_data); \
} while(0)
#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
do { \
debug_sprintf_exception(d_device->debug_area, \
d_level, \
d_str "\n", \
d_data); \
} while(0)
#define DBF_EVENT(d_level, d_str, d_data...)\
do { \
debug_sprintf_event(dasd_debug_area, \
d_level,\
d_str "\n", \
d_data); \
} while(0)
#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
do { \
struct ccw_dev_id __dev_id; \
ccw_device_get_id(d_cdev, &__dev_id); \
debug_sprintf_event(dasd_debug_area, \
d_level, \
"0.%x.%04x " d_str "\n", \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
#define DBF_EXC(d_level, d_str, d_data...)\
do { \
debug_sprintf_exception(dasd_debug_area, \
d_level,\
d_str "\n", \
d_data); \
} while(0)
/* limit size for an errorstring */
#define ERRORLENGTH 30
/* definition of dbf debug levels */
#define DBF_EMERG 0 /* system is unusable */
#define DBF_ALERT 1 /* action must be taken immediately */
#define DBF_CRIT 2 /* critical conditions */
#define DBF_ERR 3 /* error conditions */
#define DBF_WARNING 4 /* warning conditions */
#define DBF_NOTICE 5 /* normal but significant condition */
#define DBF_INFO 6 /* informational */
#define DBF_DEBUG 6 /* debug-level messages */
/* messages to be written via klogd and dbf */
#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
dev_name(&d_device->cdev->dev), d_args); \
DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
} while(0)
#define MESSAGE(d_loglevel,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
DBF_EVENT(DBF_ALERT, d_string, d_args); \
} while(0)
/* messages to be written via klogd only */
#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
dev_name(&d_device->cdev->dev), d_args); \
} while(0)
#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
} while(0)
struct dasd_ccw_req {
unsigned int magic; /* Eye catcher */
struct list_head devlist; /* for dasd_device request queue */
struct list_head blocklist; /* for dasd_block request queue */
/* Where to execute what... */
struct dasd_block *block; /* the originating block device */
struct dasd_device *memdev; /* the device used to allocate this */
struct dasd_device *startdev; /* device the request is started on */
void *cpaddr; /* address of ccw or tcw */
unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
char status; /* status of this request */
short retries; /* A retry counter */
unsigned long flags; /* flags of this request */
/* ... and how */
unsigned long starttime; /* jiffies time of request start */
int expires; /* expiration period in jiffies */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
/* these are important for recovering erroneous requests */
int intrc; /* internal error, e.g. from start_IO */
struct irb irb; /* device status in case of an error */
struct dasd_ccw_req *refers; /* ERP-chain queueing. */
void *function; /* originating ERP action */
/* these are for statistics only */
unsigned long long buildclk; /* TOD-clock of request generation */
unsigned long long startclk; /* TOD-clock of request start */
unsigned long long stopclk; /* TOD-clock of request interrupt */
unsigned long long endclk; /* TOD-clock of request termination */
/* Callback that is called after reaching final status. */
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
};
/*
* dasd_ccw_req -> status can be:
*/
#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
#define DASD_CQR_DONE 0x01 /* request is completed successfully */
#define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */
#define DASD_CQR_IN_ERP 0x03 /* request is in recovery */
#define DASD_CQR_FAILED 0x04 /* request is finally failed */
#define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */
#define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */
#define DASD_CQR_IN_IO 0x81 /* request is currently in IO */
#define DASD_CQR_ERROR 0x82 /* request is completed with error */
#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
#define DASD_CQR_CLEARED 0x84 /* request was cleared */
#define DASD_CQR_SUCCESS 0x85 /* request was successful */
/* per dasd_ccw_req flags */
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
* no, currently we are not planning to reimplement the driver in C++
*/
struct dasd_discipline {
struct module *owner;
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
int max_blocks; /* maximum number of blocks to be chained */
struct list_head list; /* used for list of disciplines */
/*
* Device recognition functions. check_device is used to verify
* the sense data and the information returned by read device
* characteristics. It returns 0 if the discipline can be used
* for the device in question. uncheck_device is called during
* device shutdown to deregister a device from its discipline.
*/
int (*check_device) (struct dasd_device *);
void (*uncheck_device) (struct dasd_device *);
/*
* do_analysis is used in the step from device state "basic" to
* state "accept". It returns 0 if the device can be made ready,
* it returns -EMEDIUMTYPE if the device can't be made ready or
* -EAGAIN if do_analysis started a ccw that needs to complete
* before the analysis may be repeated.
*/
int (*do_analysis) (struct dasd_block *);
/*
* Last things to do when a device is set online, and first things
* when it is set offline.
*/
int (*ready_to_online) (struct dasd_device *);
int (*online_to_ready) (struct dasd_device *);
/*
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
* returns a ccw chain to be used to format the device.
* handle_terminated_request allows to examine a cqr and prepare
* it for retry.
*/
struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
struct dasd_block *,
struct request *);
int (*start_IO) (struct dasd_ccw_req *);
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
struct dasd_ccw_req *(*format_device) (struct dasd_device *,
struct format_data_t *);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
* Error recovery functions. examine_error() returns a value that
* indicates what to do for an error condition. If examine_error()
* returns 'dasd_era_recover' erp_action() is called to create a
* special error recovery ccw. erp_postaction() is called after
* an error recovery ccw has finished its execution. dump_sense
* is called for every error condition to print the sense data
* to the console.
*/
dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
struct irb *);
void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
void (*handle_unsolicited_interrupt) (struct dasd_device *,
struct irb *);
/* i/o control functions. */
int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
/* suspend/resume functions */
int (*freeze) (struct dasd_device *);
int (*restore) (struct dasd_device *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
/*
* Unique identifier for dasd device.
*/
#define UA_NOT_CONFIGURED 0x00
#define UA_BASE_DEVICE 0x01
#define UA_BASE_PAV_ALIAS 0x02
#define UA_HYPER_PAV_ALIAS 0x03
struct dasd_uid {
__u8 type;
char vendor[4];
char serial[15];
__u16 ssid;
__u8 real_unit_addr;
__u8 base_unit_addr;
char vduit[33];
};
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
* eer pointer) is freed. The error reporting module needs to do all necessary
* cleanup steps.
* The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
*/
#define DASD_EER_DISABLE 0
#define DASD_EER_TRIGGER 1
/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
#define DASD_EER_FATALERROR 1
#define DASD_EER_NOPATH 2
#define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4
struct dasd_device {
/* Block device stuff. */
struct dasd_block *block;
unsigned int devindex;
unsigned long flags; /* per device flags */
unsigned short features; /* copy of devmap-features (read-only!) */
/* extended error reporting stuff (eer) */
struct dasd_ccw_req *eer_cqr;
/* Device discipline stuff. */
struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline;
char *private;
/* Device state and target state. */
int state, target;
int stopped; /* device (ccw_device_start) was stopped */
/* reference count. */
atomic_t ref_count;
/* ccw queue and memory for static ccw/erp buffers. */
struct list_head ccw_queue;
spinlock_t mem_lock;
void *ccw_mem;
void *erp_mem;
struct list_head ccw_chunks;
struct list_head erp_chunks;
atomic_t tasklet_scheduled;
struct tasklet_struct tasklet;
struct work_struct kick_work;
struct work_struct restore_device;
struct timer_list timer;
debug_info_t *debug_area;
struct ccw_device *cdev;
/* hook for alias management */
struct list_head alias_list;
};
struct dasd_block {
/* Block device stuff. */
struct gendisk *gdp;
struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct block_device *bdev;
atomic_t open_count;
unsigned long long blocks; /* size of volume in blocks */
unsigned int bp_block; /* bytes per block */
unsigned int s2b_shift; /* log2 (bp_block/512) */
struct dasd_device *base;
struct list_head ccw_queue;
spinlock_t queue_lock;
atomic_t tasklet_scheduled;
struct tasklet_struct tasklet;
struct timer_list timer;
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t profile;
#endif
};
/* reasons why device (ccw_device_start) was stopped */
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
#define DASD_STOPPED_PENDING 4 /* long busy */
#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
#define DASD_STOPPED_SU 16 /* summary unit check handling */
#define DASD_STOPPED_PM 32 /* pm state transition */
#define DASD_UNRESUMED_PM 64 /* pm resume failed state */
/* per device flags */
#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
void dasd_put_device_wake(struct dasd_device *);
/*
* Reference count inliners
*/
static inline void
dasd_get_device(struct dasd_device *device)
{
atomic_inc(&device->ref_count);
}
static inline void
dasd_put_device(struct dasd_device *device)
{
if (atomic_dec_return(&device->ref_count) == 0)
dasd_put_device_wake(device);
}
/*
* The static memory in ccw_mem and erp_mem is managed by a sorted
* list of free memory chunks.
*/
struct dasd_mchunk
{
struct list_head list;
unsigned long size;
} __attribute__ ((aligned(8)));
static inline void
dasd_init_chunklist(struct list_head *chunk_list, void *mem,
unsigned long size)
{
struct dasd_mchunk *chunk;
INIT_LIST_HEAD(chunk_list);
chunk = (struct dasd_mchunk *) mem;
chunk->size = size - sizeof(struct dasd_mchunk);
list_add(&chunk->list, chunk_list);
}
static inline void *
dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
{
struct dasd_mchunk *chunk, *tmp;
size = (size + 7L) & -8L;
list_for_each_entry(chunk, chunk_list, list) {
if (chunk->size < size)
continue;
if (chunk->size > size + sizeof(struct dasd_mchunk)) {
char *endaddr = (char *) (chunk + 1) + chunk->size;
tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
tmp->size = size;
chunk->size -= size + sizeof(struct dasd_mchunk);
chunk = tmp;
} else
list_del(&chunk->list);
return (void *) (chunk + 1);
}
return NULL;
}
static inline void
dasd_free_chunk(struct list_head *chunk_list, void *mem)
{
struct dasd_mchunk *chunk, *tmp;
struct list_head *p, *left;
chunk = (struct dasd_mchunk *)
((char *) mem - sizeof(struct dasd_mchunk));
/* Find out the left neighbour in chunk_list. */
left = chunk_list;
list_for_each(p, chunk_list) {
if (list_entry(p, struct dasd_mchunk, list) > chunk)
break;
left = p;
}
/* Try to merge with right neighbour = next element from left. */
if (left->next != chunk_list) {
tmp = list_entry(left->next, struct dasd_mchunk, list);
if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
list_del(&tmp->list);
chunk->size += tmp->size + sizeof(struct dasd_mchunk);
}
}
/* Try to merge with left neighbour. */
if (left != chunk_list) {
tmp = list_entry(left, struct dasd_mchunk, list);
if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
tmp->size += chunk->size + sizeof(struct dasd_mchunk);
return;
}
}
__list_add(&chunk->list, left, left->next);
}
/*
* Check if bsize is in { 512, 1024, 2048, 4096 }
*/
static inline int
dasd_check_blocksize(int bsize)
{
if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
return -EMEDIUMTYPE;
return 0;
}
/* externals in dasd.c */
#define DASD_PROFILE_ON 1
#define DASD_PROFILE_OFF 0
extern debug_info_t *dasd_debug_area;
extern struct dasd_profile_info_t dasd_global_profile;
extern unsigned int dasd_profile_level;
extern const struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_kmalloc_request(int , int, int, struct dasd_device *);
struct dasd_ccw_req *
dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
static inline int
dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
{
return set_normalized_cda(ccw, cda);
}
struct dasd_device *dasd_alloc_device(void);
void dasd_free_device(struct dasd_device *);
struct dasd_block *dasd_alloc_block(void);
void dasd_free_block(struct dasd_block *);
void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
int dasd_start_IO(struct dasd_ccw_req *);
int dasd_term_IO(struct dasd_ccw_req *);
void dasd_schedule_device_bh(struct dasd_device *);
void dasd_schedule_block_bh(struct dasd_block *);
int dasd_sleep_on(struct dasd_ccw_req *);
int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
void dasd_device_set_timer(struct dasd_device *, int);
void dasd_device_clear_timer(struct dasd_device *);
void dasd_block_set_timer(struct dasd_block *, int);
void dasd_block_clear_timer(struct dasd_block *);
int dasd_cancel_req(struct dasd_ccw_req *);
int dasd_flush_device_queue(struct dasd_device *);
int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
extern int dasd_probeonly;
extern int dasd_autodetect;
extern int dasd_nopav;
extern int dasd_nofcx;
int dasd_devmap_init(void);
void dasd_devmap_exit(void);
struct dasd_device *dasd_create_device(struct ccw_device *);
void dasd_delete_device(struct dasd_device *);
int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
int dasd_get_feature(struct ccw_device *, int);
int dasd_set_feature(struct ccw_device *, int, int);
int dasd_add_sysfs_files(struct ccw_device *);
void dasd_remove_sysfs_files(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
struct dasd_device *dasd_device_from_devindex(int);
int dasd_parse(void);
int dasd_busid_known(const char *);
/* externals in dasd_gendisk.c */
int dasd_gendisk_init(void);
void dasd_gendisk_exit(void);
int dasd_gendisk_alloc(struct dasd_block *);
void dasd_gendisk_free(struct dasd_block *);
int dasd_scan_partitions(struct dasd_block *);
void dasd_destroy_partitions(struct dasd_block *);
/* externals in dasd_ioctl.c */
int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
/* externals in dasd_proc.c */
int dasd_proc_init(void);
void dasd_proc_exit(void);
/* externals in dasd_erp.c */
struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
struct dasd_device *);
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
/* externals in dasd_3990_erp.c */
struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
/* externals in dasd_eer.c */
#ifdef CONFIG_DASD_EER
int dasd_eer_init(void);
void dasd_eer_exit(void);
int dasd_eer_enable(struct dasd_device *);
void dasd_eer_disable(struct dasd_device *);
void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
unsigned int id);
void dasd_eer_snss(struct dasd_device *);
static inline int dasd_eer_enabled(struct dasd_device *device)
{
return device->eer_cqr != NULL;
}
#else
#define dasd_eer_init() (0)
#define dasd_eer_exit() do { } while (0)
#define dasd_eer_enable(d) (0)
#define dasd_eer_disable(d) do { } while (0)
#define dasd_eer_write(d,c,i) do { } while (0)
#define dasd_eer_snss(d) do { } while (0)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
#endif /* __KERNEL__ */
#endif /* DASD_H */

View File

@@ -0,0 +1,436 @@
/*
* File...........: linux/drivers/s390/block/dasd_ioctl.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
* i/o controls for the dasd driver.
*/
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/smp_lock.h>
#include <asm/ccwdev.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_ioctl:"
#include "dasd_int.h"
static int
dasd_ioctl_api_version(void __user *argp)
{
int ver = DASD_API_VERSION;
return put_user(ver, (int __user *)argp);
}
/*
* Enable device.
* used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
*/
static int
dasd_ioctl_enable(struct block_device *bdev)
{
struct dasd_block *block = bdev->bd_disk->private_data;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
dasd_enable_device(block->base);
/* Formatting the dasd device can change the capacity. */
mutex_lock(&bdev->bd_mutex);
i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9);
mutex_unlock(&bdev->bd_mutex);
return 0;
}
/*
* Disable device.
* Used by dasdfmt. Disable I/O operations but allow ioctls.
*/
static int
dasd_ioctl_disable(struct block_device *bdev)
{
struct dasd_block *block = bdev->bd_disk->private_data;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/*
* Man this is sick. We don't do a real disable but only downgrade
* the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
* BIODASDDISABLE to disable accesses to the device via the block
* device layer but it still wants to do i/o on the device by
* using the BIODASDFMT ioctl. Therefore the correct state for the
* device is DASD_STATE_BASIC that allows to do basic i/o.
*/
dasd_set_target_state(block->base, DASD_STATE_BASIC);
/*
* Set i_size to zero, since read, write, etc. check against this
* value.
*/
mutex_lock(&bdev->bd_mutex);
i_size_write(bdev->bd_inode, 0);
mutex_unlock(&bdev->bd_mutex);
return 0;
}
/*
* Quiesce device.
*/
static int dasd_ioctl_quiesce(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
base = block->base;
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
pr_info("%s: The DASD has been put in the quiesce "
"state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped |= DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
/*
* Resume device.
*/
static int dasd_ioctl_resume(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
base = block->base;
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
pr_info("%s: I/O operations have been resumed "
"on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
return 0;
}
/*
* performs formatting of _device_ according to _fdata_
* Note: The discipline's format_function is assumed to deliver formatting
* commands to format a single unit of the device. In terms of the ECKD
* devices this means CCWs are generated to format a single track.
*/
static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
struct dasd_ccw_req *cqr;
struct dasd_device *base;
int rc;
base = block->base;
if (base->discipline->format_device == NULL)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
pr_warning("%s: The DASD cannot be formatted while it is "
"enabled\n", dev_name(&base->cdev->dev));
return -EBUSY;
}
DBF_DEV_EVENT(DBF_NOTICE, base,
"formatting units %u to %u (%u B blocks) flags %u",
fdata->start_unit,
fdata->stop_unit, fdata->blksize, fdata->intensity);
/* Since dasdfmt keeps the device open after it was disabled,
* there still exists an inode for this device.
* We must update i_blkbits, otherwise we might get errors when
* enabling the device later.
*/
if (fdata->start_unit == 0) {
struct block_device *bdev = bdget_disk(block->gdp, 0);
bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
bdput(bdev);
}
while (fdata->start_unit <= fdata->stop_unit) {
cqr = base->discipline->format_device(base, fdata);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
rc = dasd_sleep_on_interruptible(cqr);
dasd_sfree_request(cqr, cqr->memdev);
if (rc) {
if (rc != -ERESTARTSYS)
pr_err("%s: Formatting unit %d failed with "
"rc=%d\n", dev_name(&base->cdev->dev),
fdata->start_unit, rc);
return rc;
}
fdata->start_unit++;
}
return 0;
}
/*
* Format device.
*/
static int
dasd_ioctl_format(struct block_device *bdev, void __user *argp)
{
struct dasd_block *block = bdev->bd_disk->private_data;
struct format_data_t fdata;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
if (block->base->features & DASD_FEATURE_READONLY)
return -EROFS;
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
return -EFAULT;
if (bdev != bdev->bd_contains) {
pr_warning("%s: The specified DASD is a partition and cannot "
"be formatted\n",
dev_name(&block->base->cdev->dev));
return -EINVAL;
}
return dasd_format(block, &fdata);
}
#ifdef CONFIG_DASD_PROFILE
/*
* Reset device profile information
*/
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
memset(&block->profile, 0, sizeof(struct dasd_profile_info_t));
return 0;
}
/*
* Return device profile information
*/
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
if (dasd_profile_level == DASD_PROFILE_OFF)
return -EIO;
if (copy_to_user(argp, &block->profile,
sizeof(struct dasd_profile_info_t)))
return -EFAULT;
return 0;
}
#else
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
return -ENOSYS;
}
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
return -ENOSYS;
}
#endif
/*
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
*/
static int dasd_ioctl_information(struct dasd_block *block,
unsigned int cmd, void __user *argp)
{
struct dasd_information2_t *dasd_info;
unsigned long flags;
int rc;
struct dasd_device *base;
struct ccw_device *cdev;
struct ccw_dev_id dev_id;
base = block->base;
if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
if (dasd_info == NULL)
return -ENOMEM;
rc = base->discipline->fill_info(base, dasd_info);
if (rc) {
kfree(dasd_info);
return rc;
}
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
dasd_info->devno = dev_id.devno;
dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
dasd_info->dev_type = cdev->id.dev_type;
dasd_info->dev_model = cdev->id.dev_model;
dasd_info->status = base->state;
/*
* The open_count is increased for every opener, that includes
* the blkdev_get in dasd_scan_partitions.
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
if (!block->bdev)
dasd_info->open_count++;
/*
* check if device is really formatted
* LDL / CDL was returned by 'fill_info'
*/
if ((base->state < DASD_STATE_READY) ||
(dasd_check_blocksize(block->bp_block)))
dasd_info->format = DASD_FORMAT_NONE;
dasd_info->features |=
((base->features & DASD_FEATURE_READONLY) != 0);
memcpy(dasd_info->type, base->discipline->name, 4);
if (block->request_queue->request_fn) {
struct list_head *l;
#ifdef DASD_EXTENDED_PROFILING
{
struct list_head *l;
spin_lock_irqsave(&block->lock, flags);
list_for_each(l, &block->request_queue->queue_head)
dasd_info->req_queue_len++;
spin_unlock_irqrestore(&block->lock, flags);
}
#endif /* DASD_EXTENDED_PROFILING */
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
flags);
}
rc = 0;
if (copy_to_user(argp, dasd_info,
((cmd == (unsigned int) BIODASDINFO2) ?
sizeof(struct dasd_information2_t) :
sizeof(struct dasd_information_t))))
rc = -EFAULT;
kfree(dasd_info);
return rc;
}
/*
* Set read only
*/
static int
dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
{
struct dasd_block *block = bdev->bd_disk->private_data;
int intval;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (bdev != bdev->bd_contains)
// ro setting is not allowed for partitions
return -EINVAL;
if (get_user(intval, (int __user *)argp))
return -EFAULT;
set_disk_ro(bdev->bd_disk, intval);
return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval);
}
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
unsigned long arg)
{
struct cmbdata __user *argp = (void __user *) arg;
size_t size = _IOC_SIZE(cmd);
struct cmbdata data;
int ret;
ret = cmf_readall(block->base->cdev, &data);
if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
return -EFAULT;
return ret;
}
static int
dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg;
if (!block)
return -ENODEV;
if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
PRINT_DEBUG("empty data ptr");
return -EINVAL;
}
switch (cmd) {
case BIODASDDISABLE:
return dasd_ioctl_disable(bdev);
case BIODASDENABLE:
return dasd_ioctl_enable(bdev);
case BIODASDQUIESCE:
return dasd_ioctl_quiesce(block);
case BIODASDRESUME:
return dasd_ioctl_resume(block);
case BIODASDFMT:
return dasd_ioctl_format(bdev, argp);
case BIODASDINFO:
return dasd_ioctl_information(block, cmd, argp);
case BIODASDINFO2:
return dasd_ioctl_information(block, cmd, argp);
case BIODASDPRRD:
return dasd_ioctl_read_profile(block, argp);
case BIODASDPRRST:
return dasd_ioctl_reset_profile(block);
case BLKROSET:
return dasd_ioctl_set_ro(bdev, argp);
case DASDAPIVER:
return dasd_ioctl_api_version(argp);
case BIODASDCMFENABLE:
return enable_cmf(block->base->cdev);
case BIODASDCMFDISABLE:
return disable_cmf(block->base->cdev);
case BIODASDREADALLCMB:
return dasd_ioctl_readall_cmb(block, cmd, arg);
default:
/* if the discipline has an ioctl method try it. */
if (block->base->discipline->ioctl) {
int rval = block->base->discipline->ioctl(block, cmd, argp);
if (rval != -ENOIOCTLCMD)
return rval;
}
return -EINVAL;
}
}
int dasd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int rc;
lock_kernel();
rc = dasd_do_ioctl(bdev, mode, cmd, arg);
unlock_kernel();
return rc;
}

View File

@@ -0,0 +1,349 @@
/*
* File...........: linux/drivers/s390/block/dasd_proc.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
*
* /proc interface for the dasd driver.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_proc:"
#include "dasd_int.h"
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
static struct proc_dir_entry *dasd_devices_entry = NULL;
static struct proc_dir_entry *dasd_statistics_entry = NULL;
#ifdef CONFIG_DASD_PROFILE
static char *
dasd_get_user_string(const char __user *user_buf, size_t user_len)
{
char *buffer;
buffer = kmalloc(user_len + 1, GFP_KERNEL);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
kfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
return buffer;
}
#endif /* CONFIG_DASD_PROFILE */
static int
dasd_devices_show(struct seq_file *m, void *v)
{
struct dasd_device *device;
struct dasd_block *block;
char *substr;
device = dasd_device_from_devindex((unsigned long) v - 1);
if (IS_ERR(device))
return 0;
if (device->block)
block = device->block;
else {
dasd_put_device(device);
return 0;
}
/* Print device number. */
seq_printf(m, "%s", dev_name(&device->cdev->dev));
/* Print discipline string. */
if (device->discipline != NULL)
seq_printf(m, "(%s)", device->discipline->name);
else
seq_printf(m, "(none)");
/* Print kdev. */
if (block->gdp)
seq_printf(m, " at (%3d:%6d)",
MAJOR(disk_devt(block->gdp)),
MINOR(disk_devt(block->gdp)));
else
seq_printf(m, " at (???:??????)");
/* Print device name. */
if (block->gdp)
seq_printf(m, " is %-8s", block->gdp->disk_name);
else
seq_printf(m, " is ????????");
/* Print devices features. */
substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
seq_printf(m, "%4s: ", substr);
/* Print device status information. */
switch (device->state) {
case DASD_STATE_NEW:
seq_printf(m, "new");
break;
case DASD_STATE_KNOWN:
seq_printf(m, "detected");
break;
case DASD_STATE_BASIC:
seq_printf(m, "basic");
break;
case DASD_STATE_UNFMT:
seq_printf(m, "unformatted");
break;
case DASD_STATE_READY:
case DASD_STATE_ONLINE:
seq_printf(m, "active ");
if (dasd_check_blocksize(block->bp_block))
seq_printf(m, "n/f ");
else
seq_printf(m,
"at blocksize: %d, %lld blocks, %lld MB",
block->bp_block, block->blocks,
((block->bp_block >> 9) *
block->blocks) >> 11);
break;
default:
seq_printf(m, "no stat");
break;
}
dasd_put_device(device);
if (dasd_probeonly)
seq_printf(m, "(probeonly)");
seq_printf(m, "\n");
return 0;
}
static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= dasd_max_devindex)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return dasd_devices_start(m, pos);
}
static void dasd_devices_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations dasd_devices_seq_ops = {
.start = dasd_devices_start,
.next = dasd_devices_next,
.stop = dasd_devices_stop,
.show = dasd_devices_show,
};
static int dasd_devices_open(struct inode *inode, struct file *file)
{
return seq_open(file, &dasd_devices_seq_ops);
}
static const struct file_operations dasd_devices_file_ops = {
.owner = THIS_MODULE,
.open = dasd_devices_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int
dasd_calc_metrics(char *page, char **start, off_t off,
int count, int *eof, int len)
{
len = (len > off) ? len - off : 0;
if (len > count)
len = count;
if (len < count)
*eof = 1;
*start = page + off;
return len;
}
#ifdef CONFIG_DASD_PROFILE
static char *
dasd_statistics_array(char *str, unsigned int *array, int factor)
{
int i;
for (i = 0; i < 32; i++) {
str += sprintf(str, "%7d ", array[i] / factor);
if (i == 15)
str += sprintf(str, "\n");
}
str += sprintf(str,"\n");
return str;
}
#endif /* CONFIG_DASD_PROFILE */
static int
dasd_statistics_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
unsigned long len;
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t *prof;
char *str;
int factor;
/* check for active profiling */
if (dasd_profile_level == DASD_PROFILE_OFF) {
len = sprintf(page, "Statistics are off - they might be "
"switched on using 'echo set on > "
"/proc/dasd/statistics'\n");
return dasd_calc_metrics(page, start, off, count, eof, len);
}
prof = &dasd_global_profile;
/* prevent couter 'overflow' on output */
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
str = page;
str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
str += sprintf(str, "with %u sectors(512B each)\n",
prof->dasd_io_sects);
str += sprintf(str, "Scale Factor is %d\n", factor);
str += sprintf(str,
" __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k "
" _16k _32k _64k 128k\n");
str += sprintf(str,
" _256 _512 __1M __2M __4M __8M "
" _16M _32M _64M 128M 256M 512M "
" __1G __2G __4G " " _>4G\n");
str += sprintf(str, "Histogram of sizes (512B secs)\n");
str = dasd_statistics_array(str, prof->dasd_io_secs, factor);
str += sprintf(str, "Histogram of I/O times (microseconds)\n");
str = dasd_statistics_array(str, prof->dasd_io_times, factor);
str += sprintf(str, "Histogram of I/O times per sector\n");
str = dasd_statistics_array(str, prof->dasd_io_timps, factor);
str += sprintf(str, "Histogram of I/O time till ssch\n");
str = dasd_statistics_array(str, prof->dasd_io_time1, factor);
str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
str = dasd_statistics_array(str, prof->dasd_io_time2, factor);
str += sprintf(str, "Histogram of I/O time between ssch "
"and irq per sector\n");
str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor);
str += sprintf(str, "Histogram of I/O time between irq and end\n");
str = dasd_statistics_array(str, prof->dasd_io_time3, factor);
str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor);
len = str - page;
#else
len = sprintf(page, "Statistics are not activated in this kernel\n");
#endif
return dasd_calc_metrics(page, start, off, count, eof, len);
}
static int
dasd_statistics_write(struct file *file, const char __user *user_buf,
unsigned long user_len, void *data)
{
#ifdef CONFIG_DASD_PROFILE
char *buffer, *str;
if (user_len > 65536)
user_len = 65536;
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
/* check for valid verbs */
for (str = buffer; isspace(*str); str++);
if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
/* 'set xxx' was given */
for (str = str + 4; isspace(*str); str++);
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
dasd_profile_level = DASD_PROFILE_ON;
pr_info("The statistics feature has been switched "
"on\n");
} else if (strcmp(str, "off") == 0) {
/* switch off and reset statistics profiling */
memset(&dasd_global_profile,
0, sizeof (struct dasd_profile_info_t));
dasd_profile_level = DASD_PROFILE_OFF;
pr_info("The statistics feature has been switched "
"off\n");
} else
goto out_error;
} else if (strncmp(str, "reset", 5) == 0) {
/* reset the statistics */
memset(&dasd_global_profile, 0,
sizeof (struct dasd_profile_info_t));
pr_info("The statistics have been reset\n");
} else
goto out_error;
kfree(buffer);
return user_len;
out_error:
pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
str);
kfree(buffer);
return -EINVAL;
#else
pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
return user_len;
#endif /* CONFIG_DASD_PROFILE */
}
/*
* Create dasd proc-fs entries.
* In case creation failed, cleanup and return -ENOENT.
*/
int
dasd_proc_init(void)
{
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
if (!dasd_proc_root_entry)
goto out_nodasd;
dasd_devices_entry = proc_create("devices",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
&dasd_devices_file_ops);
if (!dasd_devices_entry)
goto out_nodevices;
dasd_statistics_entry = create_proc_entry("statistics",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry);
if (!dasd_statistics_entry)
goto out_nostatistics;
dasd_statistics_entry->read_proc = dasd_statistics_read;
dasd_statistics_entry->write_proc = dasd_statistics_write;
return 0;
out_nostatistics:
remove_proc_entry("devices", dasd_proc_root_entry);
out_nodevices:
remove_proc_entry("dasd", NULL);
out_nodasd:
return -ENOENT;
}
void
dasd_proc_exit(void)
{
remove_proc_entry("devices", dasd_proc_root_entry);
remove_proc_entry("statistics", dasd_proc_root_entry);
remove_proc_entry("dasd", NULL);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,476 @@
/*
* Xpram.c -- the S/390 expanded memory RAM-disk
*
* significant parts of this code are based on
* the sbull device driver presented in
* A. Rubini: Linux Device Drivers
*
* Author of XPRAM specific coding: Reinhard Buendgen
* buendgen@de.ibm.com
* Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* External interfaces:
* Interfaces to linux kernel
* xpram_setup: read kernel parameters
* Device specific file operations
* xpram_iotcl
* xpram_open
*
* "ad-hoc" partitioning:
* the expanded memory can be partitioned among several devices
* (with different minors). The partitioning set up can be
* set by kernel or module parameters (int devs & int sizes[])
*
* Potential future improvements:
* generic hard disk support to replace ad-hoc partitioning
*/
#define KMSG_COMPONENT "xpram"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h> /* isdigit, isxdigit */
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/sysdev.h>
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
static unsigned int xpram_pages;
static int xpram_devs;
/*
* Parameter parsing functions.
*/
static int __initdata devs = XPRAM_DEVS;
static char __initdata *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
"the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
"the defaults are 0s \n" \
"All devices with size 0 equally partition the "
"remaining space on the expanded strorage not "
"claimed by explicit sizes\n");
MODULE_LICENSE("GPL");
/*
* Copy expanded memory page (4kB) into main memory
* Arguments
* page_addr: address of target page
* xpage_index: index of expandeded memory page
* Return value
* 0: if operation succeeds
* -EIO: if pgin failed
* -ENXIO: if xpram has vanished
*/
static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
{
int cc = 2; /* return unused cc 2 if pgin traps */
asm volatile(
" .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2)
return -ENXIO;
if (cc == 1)
return -EIO;
return 0;
}
/*
* Copy a 4kB page of main memory to an expanded memory page
* Arguments
* page_addr: address of source page
* xpage_index: index of expandeded memory page
* Return value
* 0: if operation succeeds
* -EIO: if pgout failed
* -ENXIO: if xpram has vanished
*/
static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
{
int cc = 2; /* return unused cc 2 if pgin traps */
asm volatile(
" .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2)
return -ENXIO;
if (cc == 1)
return -EIO;
return 0;
}
/*
* Check if xpram is available.
*/
static int xpram_present(void)
{
unsigned long mem_page;
int rc;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return -ENOMEM;
rc = xpram_page_in(mem_page, 0);
free_page(mem_page);
return rc ? -ENXIO : 0;
}
/*
* Return index of the last available xpram page.
*/
static unsigned long xpram_highest_page_index(void)
{
unsigned int page_index, add_bit;
unsigned long mem_page;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return 0;
page_index = 0;
add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
while (add_bit > 0) {
if (xpram_page_in(mem_page, page_index | add_bit) == 0)
page_index |= add_bit;
add_bit >>= 1;
}
free_page (mem_page);
return page_index;
}
/*
* Block device make request function.
*/
static int xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec *bvec;
unsigned int index;
unsigned long page_addr;
unsigned long bytes;
int i;
if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if ((bio->bi_size >> 12) > xdev->size)
/* Request size is no page-aligned. */
goto fail;
if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
index = (bio->bi_sector >> 3) + xdev->offset;
bio_for_each_segment(bvec, bio, i) {
page_addr = (unsigned long)
kmap(bvec->bv_page) + bvec->bv_offset;
bytes = bvec->bv_len;
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
/* More paranoia. */
goto fail;
while (bytes > 0) {
if (bio_data_dir(bio) == READ) {
if (xpram_page_in(page_addr, index) != 0)
goto fail;
} else {
if (xpram_page_out(page_addr, index) != 0)
goto fail;
}
page_addr += 4096;
bytes -= 4096;
index++;
}
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_endio(bio, 0);
return 0;
fail:
bio_io_error(bio);
return 0;
}
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
unsigned long size;
/*
* get geometry: we have to fake one... trim the size to a
* multiple of 64 (32k): tell we have 16 sectors, 4 heads,
* whatever cylinders. Tell also that data starts at sector. 4.
*/
size = (xpram_pages * 8) & ~0x3f;
geo->cylinders = size >> 6;
geo->heads = 4;
geo->sectors = 16;
geo->start = 4;
return 0;
}
static const struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
.getgeo = xpram_getgeo,
};
/*
* Setup xpram_sizes array.
*/
static int __init xpram_setup_sizes(unsigned long pages)
{
unsigned long mem_needed;
unsigned long mem_auto;
unsigned long long size;
int mem_auto_no;
int i;
/* Check number of devices. */
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
pr_err("%d is not a valid number of XPRAM devices\n",devs);
return -EINVAL;
}
xpram_devs = devs;
/*
* Copy sizes array to xpram_sizes and align partition
* sizes to page boundary.
*/
mem_needed = 0;
mem_auto_no = 0;
for (i = 0; i < xpram_devs; i++) {
if (sizes[i]) {
size = simple_strtoull(sizes[i], &sizes[i], 0);
switch (sizes[i][0]) {
case 'g':
case 'G':
size <<= 20;
break;
case 'm':
case 'M':
size <<= 10;
}
xpram_sizes[i] = (size + 3) & -4UL;
}
if (xpram_sizes[i])
mem_needed += xpram_sizes[i];
else
mem_auto_no++;
}
pr_info(" number of devices (partitions): %d \n", xpram_devs);
for (i = 0; i < xpram_devs; i++) {
if (xpram_sizes[i])
pr_info(" size of partition %d: %u kB\n",
i, xpram_sizes[i]);
else
pr_info(" size of partition %d to be set "
"automatically\n",i);
}
pr_info(" memory needed (for sized partitions): %lu kB\n",
mem_needed);
pr_info(" partitions to be sized automatically: %d\n",
mem_auto_no);
if (mem_needed > pages * 4) {
pr_err("Not enough expanded memory available\n");
return -EINVAL;
}
/*
* partitioning:
* xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
* else: ; all partitions with zero xpram_sizes[i]
* partition equally the remaining space
*/
if (mem_auto_no) {
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
pr_info(" automatically determined "
"partition size: %lu kB\n", mem_auto);
for (i = 0; i < xpram_devs; i++)
if (xpram_sizes[i] == 0)
xpram_sizes[i] = mem_auto;
}
return 0;
}
static int __init xpram_setup_blkdev(void)
{
unsigned long offset;
int i, rc = -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
xpram_disks[i] = alloc_disk(1);
if (!xpram_disks[i])
goto out;
xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
if (!xpram_queues[i]) {
put_disk(xpram_disks[i]);
goto out;
}
blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}
/*
* Register xpram major.
*/
rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
if (rc < 0)
goto out;
/*
* Setup device structures.
*/
offset = 0;
for (i = 0; i < xpram_devs; i++) {
struct gendisk *disk = xpram_disks[i];
xpram_devices[i].size = xpram_sizes[i] / 4;
xpram_devices[i].offset = offset;
offset += xpram_devices[i].size;
disk->major = XPRAM_MAJOR;
disk->first_minor = i;
disk->fops = &xpram_devops;
disk->private_data = &xpram_devices[i];
disk->queue = xpram_queues[i];
sprintf(disk->disk_name, "slram%d", i);
set_capacity(disk, xpram_sizes[i] << 1);
add_disk(disk);
}
return 0;
out:
while (i--) {
blk_cleanup_queue(xpram_queues[i]);
put_disk(xpram_disks[i]);
}
return rc;
}
/*
* Resume failed: Print error message and call panic.
*/
static void xpram_resume_error(const char *message)
{
pr_err("Resuming the system failed: %s\n", message);
panic("xpram resume error\n");
}
/*
* Check if xpram setup changed between suspend and resume.
*/
static int xpram_restore(struct device *dev)
{
if (!xpram_pages)
return 0;
if (xpram_present() != 0)
xpram_resume_error("xpram disappeared");
if (xpram_pages != xpram_highest_page_index() + 1)
xpram_resume_error("Size of xpram changed");
return 0;
}
static struct dev_pm_ops xpram_pm_ops = {
.restore = xpram_restore,
};
static struct platform_driver xpram_pdrv = {
.driver = {
.name = XPRAM_NAME,
.owner = THIS_MODULE,
.pm = &xpram_pm_ops,
},
};
static struct platform_device *xpram_pdev;
/*
* Finally, the init/exit functions.
*/
static void __exit xpram_exit(void)
{
int i;
for (i = 0; i < xpram_devs; i++) {
del_gendisk(xpram_disks[i]);
blk_cleanup_queue(xpram_queues[i]);
put_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
platform_device_unregister(xpram_pdev);
platform_driver_unregister(&xpram_pdrv);
}
static int __init xpram_init(void)
{
int rc;
/* Find out size of expanded memory. */
if (xpram_present() != 0) {
pr_err("No expanded memory available\n");
return -ENODEV;
}
xpram_pages = xpram_highest_page_index() + 1;
pr_info(" %u pages expanded memory found (%lu KB).\n",
xpram_pages, (unsigned long) xpram_pages*4);
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
rc = platform_driver_register(&xpram_pdrv);
if (rc)
return rc;
xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
if (IS_ERR(xpram_pdev)) {
rc = PTR_ERR(xpram_pdev);
goto fail_platform_driver_unregister;
}
rc = xpram_setup_blkdev();
if (rc)
goto fail_platform_device_unregister;
return 0;
fail_platform_device_unregister:
platform_device_unregister(xpram_pdev);
fail_platform_driver_unregister:
platform_driver_unregister(&xpram_pdrv);
return rc;
}
module_init(xpram_init);
module_exit(xpram_exit);

View File

@@ -0,0 +1,177 @@
comment "S/390 character device drivers"
depends on S390
config TN3270
tristate "Support for locally attached 3270 terminals"
depends on CCW
help
Include support for IBM 3270 terminals.
config TN3270_TTY
tristate "Support for tty input/output on 3270 terminals"
depends on TN3270
help
Include support for using an IBM 3270 terminal as a Linux tty.
config TN3270_FS
tristate "Support for fullscreen applications on 3270 terminals"
depends on TN3270
help
Include support for fullscreen applications on an IBM 3270 terminal.
config TN3270_CONSOLE
bool "Support for console on 3270 terminal"
depends on TN3270=y && TN3270_TTY=y
help
Include support for using an IBM 3270 terminal as a Linux system
console. Available only if 3270 support is compiled in statically.
config TN3215
bool "Support for 3215 line mode terminal"
depends on CCW
help
Include support for IBM 3215 line-mode terminals.
config TN3215_CONSOLE
bool "Support for console on 3215 line mode terminal"
depends on TN3215
help
Include support for using an IBM 3215 line-mode terminal as a
Linux system console.
config CCW_CONSOLE
bool
depends on TN3215_CONSOLE || TN3270_CONSOLE
default y
config SCLP_TTY
bool "Support for SCLP line mode terminal"
depends on S390
help
Include support for IBM SCLP line-mode terminals.
config SCLP_CONSOLE
bool "Support for console on SCLP line mode terminal"
depends on SCLP_TTY
help
Include support for using an IBM HWC line-mode terminal as the Linux
system console.
config SCLP_VT220_TTY
bool "Support for SCLP VT220-compatible terminal"
depends on S390
help
Include support for an IBM SCLP VT220-compatible terminal.
config SCLP_VT220_CONSOLE
bool "Support for console on SCLP VT220-compatible terminal"
depends on SCLP_VT220_TTY
help
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
config SCLP_CPI
tristate "Control-Program Identification"
depends on S390
help
This option enables the hardware console interface for system
identification. This is commonly used for workload management and
gives you a nice name for the system on the service element.
Please select this option as a module since built-in operation is
completely untested.
You should only select this option if you know what you are doing,
need this feature and intend to run your kernel in LPAR.
config SCLP_ASYNC
tristate "Support for Call Home via Asynchronous SCLP Records"
depends on S390
help
This option enables the call home function, which is able to inform
the service element and connected organisations about a kernel panic.
You should only select this option if you know what you are doing,
want for inform other people about your kernel panics,
need this feature and intend to run your kernel in LPAR.
config S390_TAPE
tristate "S/390 tape device support"
depends on CCW
help
Select this option if you want to access channel-attached tape
devices on IBM S/390 or zSeries.
If you select this option you will also want to select at
least one of the tape interface options and one of the tape
hardware options in order to access a tape device.
This option is also available as a module. The module will be
called tape390 and include all selected interfaces and
hardware drivers.
comment "S/390 tape interface support"
depends on S390_TAPE
config S390_TAPE_BLOCK
bool "Support for tape block devices"
depends on S390_TAPE && BLOCK
help
Select this option if you want to access your channel-attached tape
devices using the block device interface. This interface is similar
to CD-ROM devices on other platforms. The tapes can only be
accessed read-only when using this interface. Have a look at
<file:Documentation/s390/TAPE> for further information about creating
volumes for and using this interface. It is safe to say "Y" here.
comment "S/390 tape hardware support"
depends on S390_TAPE
config S390_TAPE_34XX
tristate "Support for 3480/3490 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3480/3490 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config S390_TAPE_3590
tristate "Support for 3590 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3590 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config VMLOGRDR
tristate "Support for the z/VM recording system services (VM only)"
depends on IUCV
help
Select this option if you want to be able to receive records collected
by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
*SYMPTOM.
This driver depends on the IUCV support driver.
config VMCP
tristate "Support for the z/VM CP interface (VM only)"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
config MONREADER
tristate "API for reading z/VM monitor service records"
depends on IUCV
help
Character device driver for reading z/VM monitor service records
config MONWRITER
tristate "API for writing z/VM monitor service records"
depends on S390
default "m"
help
Character device driver for writing z/VM monitor service records
config S390_VMUR
tristate "z/VM unit record device driver"
depends on S390
default "m"
help
Character device driver for z/VM reader, puncher and printer.

View File

@@ -0,0 +1,36 @@
#
# S/390 character devices
#
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
obj-$(CONFIG_TN3270_TTY) += tty3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
tape-$(CONFIG_PROC_FS) += tape_proc.o
tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
obj-$(CONFIG_MONREADER) += monreader.o
obj-$(CONFIG_MONWRITER) += monwriter.o
obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,636 @@
/*
* IBM/3270 Driver - console view.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include "raw3270.h"
#include "tty3270.h"
#include "ctrlchar.h"
#define CON3270_OUTPUT_BUFFER_SIZE 1024
#define CON3270_STRING_PAGES 4
static struct raw3270_fn con3270_fn;
/*
* Main 3270 console view data structure.
*/
struct con3270 {
struct raw3270_view view;
spinlock_t lock;
struct list_head freemem; /* list of free memory for strings. */
/* Output stuff. */
struct list_head lines; /* list of lines. */
struct list_head update; /* list of lines to update. */
int line_nr; /* line number for next update. */
int nr_lines; /* # lines in list. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
struct string *cline; /* current output line. */
struct string *status; /* last line of display. */
struct raw3270_request *write; /* single write request. */
struct timer_list timer;
/* Input stuff. */
struct string *input; /* input string for read request. */
struct raw3270_request *read; /* single read request. */
struct raw3270_request *kreset; /* single keyboard reset request. */
struct tasklet_struct readlet; /* tasklet to issue read request. */
};
static struct con3270 *condev;
/* con3270->update_flags. See con3270_update for details. */
#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define CON_UPDATE_STATUS 4 /* Update status line. */
#define CON_UPDATE_ALL 8 /* Recreate screen. */
static void con3270_update(struct con3270 *);
/*
* Setup timeout for a device. On timeout trigger an update.
*/
static void con3270_set_timer(struct con3270 *cp, int expires)
{
if (expires == 0)
del_timer(&cp->timer);
else
mod_timer(&cp->timer, jiffies + expires);
}
/*
* The status line is the last line of the screen. It shows the string
* "console view" in the lower left corner and "Running"/"More..."/"Holding"
* in the lower right corner of the screen.
*/
static void
con3270_update_status(struct con3270 *cp)
{
char *str;
str = (cp->nr_up != 0) ? "History" : "Running";
memcpy(cp->status->string + 24, str, 7);
codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
cp->update_flags |= CON_UPDATE_STATUS;
}
static void
con3270_create_status(struct con3270 *cp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
'c','o','n','s','o','l','e',' ','v','i','e','w',
TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
/* Copy blueprint to status line */
memcpy(cp->status->string, blueprint, sizeof(blueprint));
/* Set TO_RA addresses. */
raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
cp->view.cols * (cp->view.rows - 1));
raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
cp->view.cols * cp->view.rows - 8);
/* Convert strings to ebcdic. */
codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
}
/*
* Set output offsets to 3270 datastream fragment of a console string.
*/
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
if (s->len >= cp->view.cols - 5)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
}
/*
* Rebuild update list to print all lines.
*/
static void
con3270_rebuild_update(struct con3270 *cp)
{
struct string *s, *n;
int nr;
/*
* Throw away update list and create a new one,
* containing all lines that will fit on the screen.
*/
list_for_each_entry_safe(s, n, &cp->update, update)
list_del_init(&s->update);
nr = cp->view.rows - 2 + cp->nr_up;
list_for_each_entry_reverse(s, &cp->lines, list) {
if (nr < cp->view.rows - 1)
list_add(&s->update, &cp->update);
if (--nr < 0)
break;
}
cp->line_nr = 0;
cp->update_flags |= CON_UPDATE_LIST;
}
/*
* Alloc string for size bytes. Free strings from history if necessary.
*/
static struct string *
con3270_alloc_string(struct con3270 *cp, size_t size)
{
struct string *s, *n;
s = alloc_string(&cp->freemem, size);
if (s)
return s;
list_for_each_entry_safe(s, n, &cp->lines, list) {
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
cp->nr_lines--;
if (free_string(&cp->freemem, s) >= size)
break;
}
s = alloc_string(&cp->freemem, size);
BUG_ON(!s);
if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
cp->nr_up = cp->nr_lines - cp->view.rows + 1;
con3270_rebuild_update(cp);
con3270_update_status(cp);
}
return s;
}
/*
* Write completion callback.
*/
static void
con3270_write_callback(struct raw3270_request *rq, void *data)
{
raw3270_request_reset(rq);
xchg(&((struct con3270 *) rq->view)->write, rq);
}
/*
* Update console display.
*/
static void
con3270_update(struct con3270 *cp)
{
struct raw3270_request *wrq;
char wcc, prolog[6];
unsigned long flags;
unsigned long updated;
struct string *s, *n;
int rc;
if (cp->view.dev)
raw3270_activate_view(&cp->view);
wrq = xchg(&cp->write, 0);
if (!wrq) {
con3270_set_timer(cp, 1);
return;
}
spin_lock_irqsave(&cp->view.lock, flags);
updated = 0;
if (cp->update_flags & CON_UPDATE_ALL) {
con3270_rebuild_update(cp);
con3270_update_status(cp);
cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
CON_UPDATE_STATUS;
}
if (cp->update_flags & CON_UPDATE_ERASE) {
/* Use erase write alternate to initialize display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
updated |= CON_UPDATE_ERASE;
} else
raw3270_request_set_cmd(wrq, TC_WRITE);
wcc = TW_NONE;
raw3270_request_add_data(wrq, &wcc, 1);
/*
* Update status line.
*/
if (cp->update_flags & CON_UPDATE_STATUS)
if (raw3270_request_add_data(wrq, cp->status->string,
cp->status->len) == 0)
updated |= CON_UPDATE_STATUS;
if (cp->update_flags & CON_UPDATE_LIST) {
prolog[0] = TO_SBA;
prolog[3] = TO_SA;
prolog[4] = TAT_COLOR;
prolog[5] = TAC_TURQ;
raw3270_buffer_address(cp->view.dev, prolog + 1,
cp->view.cols * cp->line_nr);
raw3270_request_add_data(wrq, prolog, 6);
/* Write strings in the update list to the screen. */
list_for_each_entry_safe(s, n, &cp->update, update) {
if (s != cp->cline)
con3270_update_string(cp, s, cp->line_nr);
if (raw3270_request_add_data(wrq, s->string,
s->len) != 0)
break;
list_del_init(&s->update);
if (s != cp->cline)
cp->line_nr++;
}
if (list_empty(&cp->update))
updated |= CON_UPDATE_LIST;
}
wrq->callback = con3270_write_callback;
rc = raw3270_start(&cp->view, wrq);
if (rc == 0) {
cp->update_flags &= ~updated;
if (cp->update_flags)
con3270_set_timer(cp, 1);
} else {
raw3270_request_reset(wrq);
xchg(&cp->write, wrq);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
}
/*
* Read tasklet.
*/
static void
con3270_read_tasklet(struct raw3270_request *rrq)
{
static char kreset_data = TW_KR;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
deactivate = 0;
/* Check aid byte. */
switch (cp->input->string[0]) {
case 0x7d: /* enter: jump to bottom. */
nr_up = 0;
break;
case 0xf3: /* PF3: deactivate the console view. */
deactivate = 1;
break;
case 0x6d: /* clear: start from scratch. */
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
break;
case 0xf7: /* PF7: do a page up in the console log. */
nr_up += cp->view.rows - 2;
if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
nr_up = cp->nr_lines - cp->view.rows + 1;
if (nr_up < 0)
nr_up = 0;
}
break;
case 0xf8: /* PF8: do a page down in the console log. */
nr_up -= cp->view.rows - 2;
if (nr_up < 0)
nr_up = 0;
break;
}
if (nr_up != cp->nr_up) {
cp->nr_up = nr_up;
con3270_rebuild_update(cp);
con3270_update_status(cp);
con3270_set_timer(cp, 1);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
/* Start keyboard reset command. */
raw3270_request_reset(cp->kreset);
raw3270_request_set_cmd(cp->kreset, TC_WRITE);
raw3270_request_add_data(cp->kreset, &kreset_data, 1);
raw3270_start(&cp->view, cp->kreset);
if (deactivate)
raw3270_deactivate_view(&cp->view);
raw3270_request_reset(rrq);
xchg(&cp->read, rrq);
raw3270_put_view(&cp->view);
}
/*
* Read request completion callback.
*/
static void
con3270_read_callback(struct raw3270_request *rq, void *data)
{
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
}
/*
* Issue a read request. Called only from interrupt function.
*/
static void
con3270_issue_read(struct con3270 *cp)
{
struct raw3270_request *rrq;
int rc;
rrq = xchg(&cp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
rrq->callback = con3270_read_callback;
rrq->callback_data = cp;
raw3270_request_set_cmd(rrq, TC_READMOD);
raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
/* Issue the read modified request. */
rc = raw3270_start_irq(&cp->view, rrq);
if (rc)
raw3270_request_reset(rrq);
}
/*
* Switch to the console view.
*/
static int
con3270_activate(struct raw3270_view *view)
{
struct con3270 *cp;
cp = (struct con3270 *) view;
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
return 0;
}
static void
con3270_deactivate(struct raw3270_view *view)
{
struct con3270 *cp;
cp = (struct con3270 *) view;
del_timer(&cp->timer);
}
static int
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
con3270_issue_read(cp);
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/* Console view to a 3270 device. */
static struct raw3270_fn con3270_fn = {
.activate = con3270_activate,
.deactivate = con3270_deactivate,
.intv = (void *) con3270_irq
};
static inline void
con3270_cline_add(struct con3270 *cp)
{
if (!list_empty(&cp->cline->list))
/* Already added. */
return;
list_add_tail(&cp->cline->list, &cp->lines);
cp->nr_lines++;
con3270_rebuild_update(cp);
}
static inline void
con3270_cline_insert(struct con3270 *cp, unsigned char c)
{
cp->cline->string[cp->cline->len++] =
cp->view.ascebc[(c < ' ') ? ' ' : c];
if (list_empty(&cp->cline->update)) {
list_add_tail(&cp->cline->update, &cp->update);
cp->update_flags |= CON_UPDATE_LIST;
}
}
static inline void
con3270_cline_end(struct con3270 *cp)
{
struct string *s;
unsigned int size;
/* Copy cline. */
size = (cp->cline->len < cp->view.cols - 5) ?
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
if (s->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
while (--size > cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
list_add(&s->list, &cp->cline->list);
list_del_init(&cp->cline->list);
if (!list_empty(&cp->cline->update)) {
list_add(&s->update, &cp->cline->update);
list_del_init(&cp->cline->update);
}
cp->cline->len = 0;
}
/*
* Write a string to the 3270 console
*/
static void
con3270_write(struct console *co, const char *str, unsigned int count)
{
struct con3270 *cp;
unsigned long flags;
unsigned char c;
cp = condev;
spin_lock_irqsave(&cp->view.lock, flags);
while (count-- > 0) {
c = *str++;
if (cp->cline->len == 0)
con3270_cline_add(cp);
if (c != '\n')
con3270_cline_insert(cp, c);
if (c == '\n' || cp->cline->len >= cp->view.cols)
con3270_cline_end(cp);
}
/* Setup timer to output current console buffer after 1/10 second */
cp->nr_up = 0;
if (cp->view.dev && !timer_pending(&cp->timer))
con3270_set_timer(cp, HZ/10);
spin_unlock_irqrestore(&cp->view.lock,flags);
}
static struct tty_driver *
con3270_device(struct console *c, int *index)
{
*index = c->index;
return tty3270_driver;
}
/*
* Wait for end of write request.
*/
static void
con3270_wait_write(struct con3270 *cp)
{
while (!cp->write) {
raw3270_wait_cons_dev(cp->view.dev);
barrier();
}
}
/*
* panic() calls con3270_flush through a panic_notifier
* before the system enters a disabled, endless loop.
*/
static void
con3270_flush(void)
{
struct con3270 *cp;
unsigned long flags;
cp = condev;
if (!cp->view.dev)
return;
raw3270_pm_unfreeze(&cp->view);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
cp->nr_up = 0;
con3270_rebuild_update(cp);
con3270_update_status(cp);
while (cp->update_flags != 0) {
spin_unlock_irqrestore(&cp->view.lock, flags);
con3270_update(cp);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
}
static int con3270_notify(struct notifier_block *self,
unsigned long event, void *data)
{
con3270_flush();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3270_notify,
.priority = 0,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3270_notify,
.priority = 0,
};
/*
* The console structure for the 3270 console
*/
static struct console con3270 = {
.name = "tty3270",
.write = con3270_write,
.device = con3270_device,
.flags = CON_PRINTBUFFER,
};
/*
* 3270 console initialization code called from console_init().
* NOTE: This is called before kmalloc is available.
*/
static int __init
con3270_init(void)
{
struct ccw_device *cdev;
struct raw3270 *rp;
void *cbuf;
int i;
/* Check if 3270 is to be the console */
if (!CONSOLE_IS_3270)
return -ENODEV;
/* Set the console mode for VM */
if (MACHINE_IS_VM) {
cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
cdev = ccw_device_probe_console();
if (IS_ERR(cdev))
return -ENODEV;
rp = raw3270_setup_console(cdev);
if (IS_ERR(rp))
return PTR_ERR(rp);
condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
(unsigned long) condev);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
condev->cline->len = 0;
con3270_create_status(condev);
condev->input = alloc_string(&condev->freemem, 80);
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&con3270);
return 0;
}
console_initcall(con3270_init);

View File

@@ -0,0 +1,75 @@
/*
* drivers/s390/char/ctrlchar.c
* Unified handling of special chars.
*
* Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
#include <linux/stddef.h>
#include <asm/errno.h>
#include <linux/sysrq.h>
#include <linux/ctype.h>
#include "ctrlchar.h"
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
static struct tty_struct *sysrq_tty;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
}
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
#endif
/**
* Check for special chars at start of input.
*
* @param buf Console input buffer.
* @param len Length of valid data in buffer.
* @param tty The tty struct for this console.
* @return CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
*/
unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
{
if ((len < 2) || (len > 3))
return CTRLCHAR_NONE;
/* hat is 0xb1 in codepage 037 (US etc.) and thus */
/* converted to 0x5e in ascii ('^') */
if ((buf[0] != '^') && (buf[0] != '\252'))
return CTRLCHAR_NONE;
#ifdef CONFIG_MAGIC_SYSRQ
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
sysrq_tty = tty;
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
#endif
if (len != 2)
return CTRLCHAR_NONE;
switch (tolower(buf[1])) {
case 'c':
return INTR_CHAR(tty) | CTRLCHAR_CTRL;
case 'd':
return EOF_CHAR(tty) | CTRLCHAR_CTRL;
case 'z':
return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
}
return CTRLCHAR_NONE;
}

View File

@@ -0,0 +1,20 @@
/*
* drivers/s390/char/ctrlchar.c
* Unified handling of special chars.
*
* Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
#include <linux/tty.h>
extern unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
#define CTRLCHAR_NONE (1 << 8)
#define CTRLCHAR_CTRL (2 << 8)
#define CTRLCHAR_SYSRQ (3 << 8)
#define CTRLCHAR_MASK (~0xffu)

View File

@@ -0,0 +1,158 @@
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
#include <linux/kd.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
u_short plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
};
static u_short shift_map[NR_KEYS] = {
0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
};
static u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
};
static u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
ushort *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
unsigned int keymap_count = 4;
/*
* Philosophy: most people do not define more strings, but they who do
* often want quite a lot of string space. So, we statically allocate
* the default and allocate dynamically in chunks of 512 bytes.
*/
char func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
'\033', '[', '[', 'D', 0,
'\033', '[', '[', 'E', 0,
'\033', '[', '1', '7', '~', 0,
'\033', '[', '1', '8', '~', 0,
'\033', '[', '1', '9', '~', 0,
'\033', '[', '2', '0', '~', 0,
'\033', '[', '2', '1', '~', 0,
'\033', '[', '2', '3', '~', 0,
'\033', '[', '2', '4', '~', 0,
'\033', '[', '2', '5', '~', 0,
'\033', '[', '2', '6', '~', 0,
'\033', '[', '2', '8', '~', 0,
'\033', '[', '2', '9', '~', 0,
'\033', '[', '3', '1', '~', 0,
'\033', '[', '3', '2', '~', 0,
'\033', '[', '3', '3', '~', 0,
'\033', '[', '3', '4', '~', 0,
};
char *funcbufptr = func_buf;
int funcbufsize = sizeof(func_buf);
int funcbufleft = 0; /* space left */
char *func_table[MAX_NR_FUNC] = {
func_buf + 0,
func_buf + 5,
func_buf + 10,
func_buf + 15,
func_buf + 20,
func_buf + 25,
func_buf + 31,
func_buf + 37,
func_buf + 43,
func_buf + 49,
func_buf + 55,
func_buf + 61,
func_buf + 67,
func_buf + 73,
func_buf + 79,
func_buf + 85,
func_buf + 91,
func_buf + 97,
func_buf + 103,
func_buf + 109,
NULL,
};
struct kbdiacruc accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int accent_table_size = 4;

View File

@@ -0,0 +1,191 @@
# Default keymap for 3270 (ebcdic codepage 037).
keymaps 0-1,4-5
keycode 0 = nul Oslash
keycode 1 = nul a
keycode 2 = nul b
keycode 3 = nul c
keycode 4 = nul d
keycode 5 = nul e
keycode 6 = nul f
keycode 7 = nul g
keycode 8 = nul h
keycode 9 = nul i
keycode 10 = nul guillemotleft
keycode 11 = nul guillemotright
keycode 12 = nul eth
keycode 13 = nul yacute
keycode 14 = nul thorn
keycode 15 = nul plusminus
keycode 16 = nul degree
keycode 17 = nul j
keycode 18 = nul k
keycode 19 = nul l
keycode 20 = nul m
keycode 21 = nul n
keycode 22 = nul o
keycode 23 = nul p
keycode 24 = nul q
keycode 25 = nul r
keycode 26 = nul nul
keycode 27 = nul nul
keycode 28 = nul ae
keycode 29 = nul cedilla
keycode 30 = nul AE
keycode 31 = nul currency
keycode 32 = nul mu
keycode 33 = nul tilde
keycode 34 = nul s
keycode 35 = nul t
keycode 36 = nul u
keycode 37 = nul v
keycode 38 = nul w
keycode 39 = nul x
keycode 40 = nul y
keycode 41 = nul z
keycode 42 = nul exclamdown
keycode 43 = nul questiondown
keycode 44 = nul ETH
keycode 45 = nul Yacute
keycode 46 = nul THORN
keycode 47 = nul registered
keycode 48 = nul dead_circumflex
keycode 49 = nul sterling
keycode 50 = nul yen
keycode 51 = nul periodcentered
keycode 52 = nul copyright
keycode 53 = nul section
keycode 54 = nul paragraph
keycode 55 = nul onequarter
keycode 56 = nul onehalf
keycode 57 = nul threequarters
keycode 58 = nul bracketleft
keycode 59 = nul bracketright
keycode 60 = nul nul
keycode 61 = nul diaeresis
keycode 62 = nul acute
keycode 63 = nul multiply
keycode 64 = space braceleft
keycode 65 = nul A
keycode 66 = acircumflex B
keycode 67 = adiaeresis C
keycode 68 = agrave D
keycode 69 = aacute E
keycode 70 = atilde F
keycode 71 = aring G
keycode 72 = ccedilla H
keycode 73 = ntilde I
keycode 74 = cent nul
keycode 75 = period ocircumflex
keycode 76 = less odiaeresis
keycode 77 = parenleft ograve
keycode 78 = plus oacute
keycode 79 = bar otilde
keycode 80 = ampersand braceright
keycode 81 = eacute J
keycode 82 = acircumflex K
keycode 83 = ediaeresis L
keycode 84 = egrave M
keycode 85 = iacute N
keycode 86 = icircumflex O
keycode 87 = idiaeresis P
keycode 88 = igrave Q
keycode 89 = ssharp R
keycode 90 = exclam onesuperior
keycode 91 = dollar ucircumflex
keycode 92 = asterisk udiaeresis
keycode 93 = parenright ugrave
keycode 94 = semicolon uacute
keycode 95 = notsign ydiaeresis
keycode 96 = minus backslash
keycode 97 = slash division
keycode 98 = Acircumflex S
keycode 99 = Adiaeresis T
keycode 100 = Agrave U
keycode 101 = Aacute V
keycode 102 = Atilde W
keycode 103 = Aring X
keycode 104 = Ccedilla Y
keycode 105 = Ntilde Z
keycode 106 = brokenbar twosuperior
keycode 107 = comma Ocircumflex
keycode 108 = percent Odiaeresis
keycode 109 = underscore Ograve
keycode 110 = greater Oacute
keycode 111 = question Otilde
keycode 112 = oslash zero
keycode 113 = Eacute one
keycode 114 = Ecircumflex two
keycode 115 = Ediaeresis three
keycode 116 = Egrave four
keycode 117 = Iacute five
keycode 118 = Icircumflex six
keycode 119 = Idiaeresis seven
keycode 120 = Igrave eight
keycode 121 = grave nine
keycode 122 = colon threesuperior
keycode 123 = numbersign Ucircumflex
keycode 124 = at Udiaeresis
keycode 125 = apostrophe Ugrave
keycode 126 = equal Uacute
keycode 127 = quotedbl nul
# AID keys
control keycode 74 = F22
control keycode 75 = F23
control keycode 76 = F24
control keycode 107 = Control_z # PA3
control keycode 108 = Control_c # PA1
control keycode 109 = KeyboardSignal # Clear
control keycode 110 = Control_d # PA2
control keycode 122 = F10
control keycode 123 = F11 # F11
control keycode 124 = Last_Console # F12
control keycode 125 = Linefeed
shift control keycode 65 = F13
shift control keycode 66 = F14
shift control keycode 67 = F15
shift control keycode 68 = F16
shift control keycode 69 = F17
shift control keycode 70 = F18
shift control keycode 71 = F19
shift control keycode 72 = F20
shift control keycode 73 = F21
shift control keycode 113 = F1
shift control keycode 114 = F2
shift control keycode 115 = Incr_Console
shift control keycode 116 = F4
shift control keycode 117 = F5
shift control keycode 118 = F6
shift control keycode 119 = Scroll_Backward
shift control keycode 120 = Scroll_Forward
shift control keycode 121 = F9
string F1 = "\033[[A"
string F2 = "\033[[B"
string F3 = "\033[[C"
string F4 = "\033[[D"
string F5 = "\033[[E"
string F6 = "\033[17~"
string F7 = "\033[18~"
string F8 = "\033[19~"
string F9 = "\033[20~"
string F10 = "\033[21~"
string F11 = "\033[23~"
string F12 = "\033[24~"
string F13 = "\033[25~"
string F14 = "\033[26~"
string F15 = "\033[28~"
string F16 = "\033[29~"
string F17 = "\033[31~"
string F18 = "\033[32~"
string F19 = "\033[33~"
string F20 = "\033[34~"
# string F21 ??
# string F22 ??
# string F23 ??
# string F24 ??
compose '^' 'c' to Control_c
compose '^' 'd' to Control_d
compose '^' 'z' to Control_z
compose '^' '\012' to nul

View File

@@ -0,0 +1,540 @@
/*
* IBM/3270 Driver - fullscreen driver.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/smp_lock.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include "raw3270.h"
#include "ctrlchar.h"
static struct raw3270_fn fs3270_fn;
struct fs3270 {
struct raw3270_view view;
struct pid *fs_pid; /* Pid of controlling program. */
int read_command; /* ccw command to use for reads. */
int write_command; /* ccw command to use for writes. */
int attention; /* Got attention. */
int active; /* Fullscreen view is active. */
struct raw3270_request *init; /* single init request. */
wait_queue_head_t wait; /* Init & attention wait queue. */
struct idal_buffer *rdbuf; /* full-screen-deactivate buffer */
size_t rdbuf_size; /* size of data returned by RDBUF */
};
static void
fs3270_wake_up(struct raw3270_request *rq, void *data)
{
wake_up((wait_queue_head_t *) data);
}
static inline int
fs3270_working(struct fs3270 *fp)
{
/*
* The fullscreen view is in working order if the view
* has been activated AND the initial request is finished.
*/
return fp->active && raw3270_request_final(fp->init);
}
static int
fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
{
struct fs3270 *fp;
int rc;
fp = (struct fs3270 *) view;
rq->callback = fs3270_wake_up;
rq->callback_data = &fp->wait;
do {
if (!fs3270_working(fp)) {
/* Fullscreen view isn't ready yet. */
rc = wait_event_interruptible(fp->wait,
fs3270_working(fp));
if (rc != 0)
break;
}
rc = raw3270_start(view, rq);
if (rc == 0) {
/* Started sucessfully. Now wait for completion. */
wait_event(fp->wait, raw3270_request_final(rq));
}
} while (rc == -EACCES);
return rc;
}
/*
* Switch to the fullscreen view.
*/
static void
fs3270_reset_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void
fs3270_restore_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
if (rq->rc != 0 || rq->rescnt != 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
fp->rdbuf_size = 0;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static int
fs3270_activate(struct raw3270_view *view)
{
struct fs3270 *fp;
char *cp;
int rc;
fp = (struct fs3270 *) view;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return 0;
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
fp->init->callback = fs3270_reset_callback;
} else {
/* Restore fullscreen buffer saved by fs3270_deactivate. */
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->ccw.count = fp->rdbuf_size;
cp = fp->rdbuf->data[0];
cp[0] = TW_KR;
cp[1] = TO_SBA;
cp[2] = cp[6];
cp[3] = cp[7];
cp[4] = TO_IC;
cp[5] = TO_SBA;
cp[6] = 0x40;
cp[7] = 0x40;
fp->init->rescnt = 0;
fp->init->callback = fs3270_restore_callback;
}
rc = fp->init->rc = raw3270_start_locked(view, fp->init);
if (rc)
fp->init->callback(fp->init, NULL);
else
fp->active = 1;
return rc;
}
/*
* Shutdown fullscreen view.
*/
static void
fs3270_save_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
/* Correct idal buffer element 0 address. */
fp->rdbuf->data[0] -= 5;
fp->rdbuf->size += 5;
/*
* If the rdbuf command failed or the idal buffer is
* to small for the amount of data returned by the
* rdbuf command, then we have no choice but to send
* a SIGHUP to the application.
*/
if (rq->rc != 0 || rq->rescnt == 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
fp->rdbuf_size = 0;
} else
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void
fs3270_deactivate(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
fp->active = 0;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return;
/* Prepare read-buffer request. */
raw3270_request_set_cmd(fp->init, TC_RDBUF);
/*
* Hackish: skip first 5 bytes of the idal buffer to make
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
fp->rdbuf->data[0] += 5;
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
fp->init->callback = fs3270_save_callback;
/* Start I/O to read in the 3270 buffer. */
fp->init->rc = raw3270_start_locked(view, fp->init);
if (fp->init->rc)
fp->init->callback(fp->init, NULL);
}
static int
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fp->attention = 1;
wake_up(&fp->wait);
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
* Process reads from fullscreen 3270.
*/
static ssize_t
fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
ssize_t rc;
if (count == 0 || count > 65535)
return -EINVAL;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (fp->read_command == 0 && fp->write_command != 0)
fp->read_command = 6;
raw3270_request_set_cmd(rq, fp->read_command ? : 2);
raw3270_request_set_idal(rq, ib);
rc = wait_event_interruptible(fp->wait, fp->attention);
fp->attention = 0;
if (rc == 0) {
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0) {
count -= rq->rescnt;
if (idal_buffer_to_user(ib, data, count) != 0)
rc = -EFAULT;
else
rc = count;
}
}
raw3270_request_free(rq);
} else
rc = PTR_ERR(rq);
idal_buffer_free(ib);
return rc;
}
/*
* Process writes to fullscreen 3270.
*/
static ssize_t
fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
int write_command;
ssize_t rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (idal_buffer_from_user(ib, data, count) == 0) {
write_command = fp->write_command ? : 1;
if (write_command == 5)
write_command = 13;
raw3270_request_set_cmd(rq, write_command);
raw3270_request_set_idal(rq, ib);
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0)
rc = count - rq->rescnt;
} else
rc = -EFAULT;
raw3270_request_free(rq);
} else
rc = PTR_ERR(rq);
idal_buffer_free(ib);
return rc;
}
/*
* process ioctl commands for the tube driver
*/
static long
fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
rc = 0;
lock_kernel();
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
break;
case TUBOCMD:
fp->write_command = arg;
break;
case TUBGETI:
rc = put_user(fp->read_command, (char __user *) arg);
break;
case TUBGETO:
rc = put_user(fp->write_command,(char __user *) arg);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
iocb.line_cnt = fp->view.rows;
iocb.col_cnt = fp->view.cols;
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
if (copy_to_user((char __user *) arg, &iocb,
sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
unlock_kernel();
return rc;
}
/*
* Allocate fs3270 structure.
*/
static struct fs3270 *
fs3270_alloc_view(void)
{
struct fs3270 *fp;
fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
if (!fp)
return ERR_PTR(-ENOMEM);
fp->init = raw3270_request_alloc(0);
if (IS_ERR(fp->init)) {
kfree(fp);
return ERR_PTR(-ENOMEM);
}
return fp;
}
/*
* Free fs3270 structure.
*/
static void
fs3270_free_view(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
if (fp->rdbuf)
idal_buffer_free(fp->rdbuf);
raw3270_request_free(((struct fs3270 *) view)->init);
kfree(view);
}
/*
* Unlink fs3270 data structure from filp.
*/
static void
fs3270_release(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
/* View to a 3270 device. Can be console, tty or fullscreen. */
static struct raw3270_fn fs3270_fn = {
.activate = fs3270_activate,
.deactivate = fs3270_deactivate,
.intv = (void *) fs3270_irq,
.release = fs3270_release,
.free = fs3270_free_view
};
/*
* This routine is called whenever a 3270 fullscreen device is opened.
*/
static int
fs3270_open(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
struct idal_buffer *ib;
int minor, rc = 0;
if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
return -ENODEV;
minor = iminor(filp->f_path.dentry->d_inode);
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
tty_kref_put(tty);
return -ENODEV;
}
minor = tty->index + RAW3270_FIRSTMINOR;
tty_kref_put(tty);
}
lock_kernel();
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
raw3270_put_view(&fp->view);
rc = -EBUSY;
goto out;
}
/* Allocate fullscreen view structure. */
fp = fs3270_alloc_view();
if (IS_ERR(fp)) {
rc = PTR_ERR(fp);
goto out;
}
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
}
/* Allocate idal-buffer. */
ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
rc = PTR_ERR(fp);
goto out;
}
fp->rdbuf = ib;
rc = raw3270_activate_view(&fp->view);
if (rc) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
goto out;
}
filp->private_data = fp;
out:
unlock_kernel();
return rc;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static int
fs3270_close(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
fp = filp->private_data;
filp->private_data = NULL;
if (fp) {
put_pid(fp->fs_pid);
fp->fs_pid = NULL;
raw3270_reset(&fp->view);
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
}
return 0;
}
static const struct file_operations fs3270_fops = {
.owner = THIS_MODULE, /* owner */
.read = fs3270_read, /* read */
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
.open = fs3270_open, /* open */
.release = fs3270_close, /* release */
};
/*
* 3270 fullscreen driver initialization.
*/
static int __init
fs3270_init(void)
{
int rc;
rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
if (rc)
return rc;
return 0;
}
static void __exit
fs3270_exit(void)
{
unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
module_init(fs3270_init);
module_exit(fs3270_exit);

View File

@@ -0,0 +1,560 @@
/*
* drivers/s390/char/keyboard.c
* ebcdic keycode functions for s390 console drivers
*
* S390 version
* Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
#include <linux/consolemap.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <asm/uaccess.h>
#include "keyboard.h"
/*
* Handler Tables.
*/
#define K_HANDLERS\
k_self, k_fn, k_spec, k_ignore,\
k_dead, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore
typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
static k_handler_fn K_HANDLERS;
static k_handler_fn *k_handler[16] = { K_HANDLERS };
/* maximum values each key_handler can handle */
static const int kbd_max_vals[] = {
255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
static unsigned char ret_diacr[NR_DEAD] = {
'`', '\'', '^', '~', '"', ','
};
/*
* Alloc/free of kbd_data structures.
*/
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
int i, len;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
goto out;
kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL);
if (!kbd->key_maps)
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
if (key_maps[i]) {
kbd->key_maps[i] =
kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
memcpy(kbd->key_maps[i], key_maps[i],
sizeof(u_short)*NR_KEYS);
}
}
kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
if (!kbd->func_table)
goto out_maps;
for (i = 0; i < ARRAY_SIZE(func_table); i++) {
if (func_table[i]) {
len = strlen(func_table[i]) + 1;
kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
memcpy(kbd->func_table[i], func_table[i], len);
}
}
kbd->fn_handler =
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
kbd->accent_table =
kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
memcpy(kbd->accent_table, accent_table,
sizeof(struct kbdiacruc)*MAX_DIACR);
kbd->accent_table_size = accent_table_size;
return kbd;
out_fn_handler:
kfree(kbd->fn_handler);
out_func:
for (i = 0; i < ARRAY_SIZE(func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
out_maps:
for (i = 0; i < ARRAY_SIZE(key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
out_kbd:
kfree(kbd);
out:
return NULL;
}
void
kbd_free(struct kbd_data *kbd)
{
int i;
kfree(kbd->accent_table);
kfree(kbd->fn_handler);
for (i = 0; i < ARRAY_SIZE(func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
for (i = 0; i < ARRAY_SIZE(key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
kfree(kbd);
}
/*
* Generate ascii -> ebcdic translation table from kbd_data.
*/
void
kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ascebc, 0x40, 256);
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
k = ((i & 1) << 7) + j;
keysym = keymap[j];
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ascebc[KVAL(keysym)] = k;
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ascebc[ret_diacr[KVAL(keysym)]] = k;
}
}
}
#if 0
/*
* Generate ebcdic -> ascii translation table from kbd_data.
*/
void
kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ebcasc, ' ', 256);
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
keysym = keymap[j];
k = ((i & 1) << 7) + j;
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ebcasc[k] = KVAL(keysym);
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ebcasc[k] = ret_diacr[KVAL(keysym)];
}
}
}
#endif
/*
* We have a combining character DIACR here, followed by the character CH.
* If the combination occurs in the table, return the corresponding value.
* Otherwise, if CH is a space or equals DIACR, return DIACR.
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
static unsigned int
handle_diacr(struct kbd_data *kbd, unsigned int ch)
{
int i, d;
d = kbd->diacr;
kbd->diacr = 0;
for (i = 0; i < kbd->accent_table_size; i++) {
if (kbd->accent_table[i].diacr == d &&
kbd->accent_table[i].base == ch)
return kbd->accent_table[i].result;
}
if (ch == ' ' || ch == d)
return d;
kbd_put_queue(kbd->tty, d);
return ch;
}
/*
* Handle dead key.
*/
static void
k_dead(struct kbd_data *kbd, unsigned char value)
{
value = ret_diacr[value];
kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
}
/*
* Normal character handler.
*/
static void
k_self(struct kbd_data *kbd, unsigned char value)
{
if (kbd->diacr)
value = handle_diacr(kbd, value);
kbd_put_queue(kbd->tty, value);
}
/*
* Special key handlers
*/
static void
k_ignore(struct kbd_data *kbd, unsigned char value)
{
}
/*
* Function key handler.
*/
static void
k_fn(struct kbd_data *kbd, unsigned char value)
{
if (kbd->func_table[value])
kbd_puts_queue(kbd->tty, kbd->func_table[value]);
}
static void
k_spec(struct kbd_data *kbd, unsigned char value)
{
if (value >= NR_FN_HANDLER)
return;
if (kbd->fn_handler[value])
kbd->fn_handler[value](kbd);
}
/*
* Put utf8 character to tty flip buffer.
* UTF-8 is defined for words of up to 31 bits,
* but we need only 16 bits here
*/
static void
to_utf8(struct tty_struct *tty, ushort c)
{
if (c < 0x80)
/* 0******* */
kbd_put_queue(tty, c);
else if (c < 0x800) {
/* 110***** 10****** */
kbd_put_queue(tty, 0xc0 | (c >> 6));
kbd_put_queue(tty, 0x80 | (c & 0x3f));
} else {
/* 1110**** 10****** 10****** */
kbd_put_queue(tty, 0xe0 | (c >> 12));
kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f));
kbd_put_queue(tty, 0x80 | (c & 0x3f));
}
}
/*
* Process keycode.
*/
void
kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
{
unsigned short keysym;
unsigned char type, value;
if (!kbd || !kbd->tty)
return;
if (keycode >= 384)
keysym = kbd->key_maps[5][keycode - 384];
else if (keycode >= 256)
keysym = kbd->key_maps[4][keycode - 256];
else if (keycode >= 128)
keysym = kbd->key_maps[1][keycode - 128];
else
keysym = kbd->key_maps[0][keycode];
type = KTYP(keysym);
if (type >= 0xf0) {
type -= 0xf0;
if (type == KT_LETTER)
type = KT_LATIN;
value = KVAL(keysym);
#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
handle_sysrq(value, kbd->tty);
return;
}
if (value == '-') {
kbd->sysrq = K(KT_LATIN, '-');
return;
}
/* Incomplete sysrq sequence. */
(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
kbd->sysrq = 0;
} else if ((type == KT_LATIN && value == '^') ||
(type == KT_DEAD && ret_diacr[value] == '^')) {
kbd->sysrq = K(type, value);
return;
}
#endif
(*k_handler[type])(kbd, value);
} else
to_utf8(kbd->tty, keysym);
}
/*
* Ioctl stuff.
*/
static int
do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
#if NR_KEYS < 256
if (tmp.kb_index >= NR_KEYS)
return -EINVAL;
#endif
#if MAX_NR_KEYMAPS < 256
if (tmp.kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBENT:
key_map = kbd->key_maps[tmp.kb_table];
if (key_map) {
val = U(key_map[tmp.kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
key_map = kbd->key_maps[tmp.kb_table];
if (key_map) {
kbd->key_maps[tmp.kb_table] = NULL;
kfree(key_map);
}
break;
}
if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
return -EINVAL;
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
if (!(key_map = kbd->key_maps[tmp.kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
kbd->key_maps[tmp.kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
ov = U(key_map[tmp.kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
* Attention Key.
*/
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
key_map[tmp.kb_index] = U(tmp.kb_value);
break;
}
return 0;
}
static int
do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
int cmd, int perm)
{
unsigned char kb_func;
char *p;
int len;
/* Get u_kbs->kb_func. */
if (get_user(kb_func, &u_kbs->kb_func))
return -EFAULT;
#if MAX_NR_FUNC < 256
if (kb_func >= MAX_NR_FUNC)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBSENT:
p = kbd->func_table[kb_func];
if (p) {
len = strlen(p);
if (len >= sizeof(u_kbs->kb_string))
len = sizeof(u_kbs->kb_string) - 1;
if (copy_to_user(u_kbs->kb_string, p, len))
return -EFAULT;
} else
len = 0;
if (put_user('\0', u_kbs->kb_string + len))
return -EFAULT;
break;
case KDSKBSENT:
if (!perm)
return -EPERM;
len = strnlen_user(u_kbs->kb_string,
sizeof(u_kbs->kb_string) - 1);
if (!len)
return -EFAULT;
if (len > sizeof(u_kbs->kb_string) - 1)
return -EINVAL;
p = kmalloc(len + 1, GFP_KERNEL);
if (!p)
return -ENOMEM;
if (copy_from_user(p, u_kbs->kb_string, len)) {
kfree(p);
return -EFAULT;
}
p[len] = 0;
kfree(kbd->func_table[kb_func]);
kbd->func_table[kb_func] = p;
break;
}
return 0;
}
int
kbd_ioctl(struct kbd_data *kbd, struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp;
unsigned int ct;
int perm;
argp = (void __user *)arg;
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
*/
perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG);
switch (cmd) {
case KDGKBTYPE:
return put_user(KB_101, (char __user *)argp);
case KDGKBENT:
case KDSKBENT:
return do_kdsk_ioctl(kbd, argp, cmd, perm);
case KDGKBSENT:
case KDSKBSENT:
return do_kdgkb_ioctl(kbd, argp, cmd, perm);
case KDGKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (put_user(kbd->accent_table_size, &a->kb_cnt))
return -EFAULT;
for (i = 0; i < kbd->accent_table_size; i++) {
diacr.diacr = kbd->accent_table[i].diacr;
diacr.base = kbd->accent_table[i].base;
diacr.result = kbd->accent_table[i].result;
if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
return -EFAULT;
}
return 0;
}
case KDGKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
ct = kbd->accent_table_size;
if (put_user(ct, &a->kb_cnt))
return -EFAULT;
if (copy_to_user(a->kbdiacruc, kbd->accent_table,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
case KDSKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
for (i = 0; i < ct; i++) {
if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
return -EFAULT;
kbd->accent_table[i].diacr = diacr.diacr;
kbd->accent_table[i].base = diacr.base;
kbd->accent_table[i].result = diacr.result;
}
return 0;
}
case KDSKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
if (copy_from_user(kbd->accent_table, a->kbdiacruc,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
default:
return -ENOIOCTLCMD;
}
}
EXPORT_SYMBOL(kbd_ioctl);
EXPORT_SYMBOL(kbd_ascebc);
EXPORT_SYMBOL(kbd_free);
EXPORT_SYMBOL(kbd_alloc);
EXPORT_SYMBOL(kbd_keycode);

View File

@@ -0,0 +1,57 @@
/*
* drivers/s390/char/keyboard.h
* ebcdic keycode functions for s390 console drivers
*
* Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/keyboard.h>
#define NR_FN_HANDLER 20
struct kbd_data;
typedef void (fn_handler_fn)(struct kbd_data *);
/*
* FIXME: explain key_maps tricks.
*/
struct kbd_data {
struct tty_struct *tty;
unsigned short **key_maps;
char **func_table;
fn_handler_fn **fn_handler;
struct kbdiacruc *accent_table;
unsigned int accent_table_size;
unsigned int diacr;
unsigned short sysrq;
};
struct kbd_data *kbd_alloc(void);
void kbd_free(struct kbd_data *);
void kbd_ascebc(struct kbd_data *, unsigned char *);
void kbd_keycode(struct kbd_data *, unsigned int);
int kbd_ioctl(struct kbd_data *, struct file *, unsigned int, unsigned long);
/*
* Helper Functions.
*/
static inline void
kbd_put_queue(struct tty_struct *tty, int ch)
{
tty_insert_flip_char(tty, ch, 0);
tty_schedule_flip(tty);
}
static inline void
kbd_puts_queue(struct tty_struct *tty, char *cp)
{
while (*cp)
tty_insert_flip_char(tty, *cp++, 0);
tty_schedule_flip(tty);
}

View File

@@ -0,0 +1,647 @@
/*
* Character device driver for reading z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2004, 2009
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#define KMSG_COMPONENT "monreader"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/device.h>
#include <net/iucv/iucv.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
#define MON_COLLECT_SAMPLE 0x80
#define MON_COLLECT_EVENT 0x40
#define MON_SERVICE "*MONITOR"
#define MON_IN_USE 0x01
#define MON_MSGLIM 255
static char mon_dcss_name[9] = "MONDCSS\0";
struct mon_msg {
u32 pos;
u32 mca_offset;
struct iucv_message msg;
char msglim_reached;
char replied_msglim;
};
struct mon_private {
struct iucv_path *path;
struct mon_msg *msg_array[MON_MSGLIM];
unsigned int write_index;
unsigned int read_index;
atomic_t msglim_count;
atomic_t read_ready;
atomic_t iucv_connected;
atomic_t iucv_severed;
};
static unsigned long mon_in_use = 0;
static unsigned long mon_dcss_start;
static unsigned long mon_dcss_end;
static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
static u8 user_data_connect[16] = {
/* Version code, must be 0x01 for shared mode */
0x01,
/* what to collect */
MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
/* DCSS name in EBCDIC, 8 bytes padded with blanks */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static struct device *monreader_device;
/******************************************************************************
* helper functions *
*****************************************************************************/
/*
* Create the 8 bytes EBCDIC DCSS segment name from
* an ASCII name, incl. padding
*/
static void dcss_mkname(char *ascii_name, char *ebcdic_name)
{
int i;
for (i = 0; i < 8; i++) {
if (ascii_name[i] == '\0')
break;
ebcdic_name[i] = toupper(ascii_name[i]);
};
for (; i < 8; i++)
ebcdic_name[i] = ' ';
ASCEBC(ebcdic_name, 8);
}
static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg;
}
static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg[4];
}
static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
}
static inline u32 mon_mca_size(struct mon_msg *monmsg)
{
return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
}
static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
}
static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
}
static int mon_check_mca(struct mon_msg *monmsg)
{
if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
(mon_rec_start(monmsg) < mon_dcss_start) ||
(mon_rec_end(monmsg) > mon_dcss_end) ||
(mon_mca_type(monmsg, 0) == 0) ||
(mon_mca_size(monmsg) % 12 != 0) ||
(mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
(mon_mca_end(monmsg) > mon_dcss_end) ||
(mon_mca_start(monmsg) < mon_dcss_start) ||
((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
return -EINVAL;
return 0;
}
static int mon_send_reply(struct mon_msg *monmsg,
struct mon_private *monpriv)
{
int rc;
rc = iucv_message_reply(monpriv->path, &monmsg->msg,
IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count);
if (likely(!monmsg->msglim_reached)) {
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
} else
monmsg->replied_msglim = 1;
if (rc) {
pr_err("Reading monitor data failed with rc=%i\n", rc);
return -EIO;
}
return 0;
}
static void mon_free_mem(struct mon_private *monpriv)
{
int i;
for (i = 0; i < MON_MSGLIM; i++)
if (monpriv->msg_array[i])
kfree(monpriv->msg_array[i]);
kfree(monpriv);
}
static struct mon_private *mon_alloc_mem(void)
{
int i;
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return NULL;
for (i = 0; i < MON_MSGLIM; i++) {
monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
GFP_KERNEL);
if (!monpriv->msg_array[i]) {
mon_free_mem(monpriv);
return NULL;
}
}
return monpriv;
}
static inline void mon_next_mca(struct mon_msg *monmsg)
{
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return;
monmsg->mca_offset += 12;
monmsg->pos = 0;
}
static struct mon_msg *mon_next_message(struct mon_private *monpriv)
{
struct mon_msg *monmsg;
if (!atomic_read(&monpriv->read_ready))
return NULL;
monmsg = monpriv->msg_array[monpriv->read_index];
if (unlikely(monmsg->replied_msglim)) {
monmsg->replied_msglim = 0;
monmsg->msglim_reached = 0;
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
return ERR_PTR(-EOVERFLOW);
}
return monmsg;
}
/******************************************************************************
* IUCV handler *
*****************************************************************************/
static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue);
}
static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
ipuser[0]);
iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue);
wake_up_interruptible(&mon_read_wait_queue);
}
static void mon_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct mon_private *monpriv = path->private;
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
pr_warning("The read queue for monitor data is full\n");
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
}
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
atomic_inc(&monpriv->read_ready);
wake_up_interruptible(&mon_read_wait_queue);
}
static struct iucv_handler monreader_iucv_handler = {
.path_complete = mon_iucv_path_complete,
.path_severed = mon_iucv_path_severed,
.message_pending = mon_iucv_message_pending,
};
/******************************************************************************
* file operations *
*****************************************************************************/
static int mon_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
int rc;
/*
* only one user allowed
*/
lock_kernel();
rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
goto out;
rc = -ENOMEM;
monpriv = mon_alloc_mem();
if (!monpriv)
goto out_use;
/*
* Connect to *MONITOR service
*/
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out_priv;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
rc = -EIO;
goto out_path;
}
/*
* Wait for connection confirmation
*/
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed)) {
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
rc = -EIO;
goto out_path;
}
filp->private_data = monpriv;
dev_set_drvdata(monreader_device, monpriv);
unlock_kernel();
return nonseekable_open(inode, filp);
out_path:
iucv_path_free(monpriv->path);
out_priv:
mon_free_mem(monpriv);
out_use:
clear_bit(MON_IN_USE, &mon_in_use);
out:
unlock_kernel();
return rc;
}
static int mon_close(struct inode *inode, struct file *filp)
{
int rc, i;
struct mon_private *monpriv = filp->private_data;
/*
* Close IUCV connection and unregister
*/
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warning("Disconnecting the z/VM *MONITOR system "
"service failed with rc=%i\n", rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
dev_set_drvdata(monreader_device, NULL);
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
clear_bit(MON_IN_USE, &mon_in_use);
return 0;
}
static ssize_t mon_read(struct file *filp, char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
struct mon_msg *monmsg;
int ret;
u32 mce_start;
monmsg = mon_next_message(monpriv);
if (IS_ERR(monmsg))
return PTR_ERR(monmsg);
if (!monmsg) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(mon_read_wait_queue,
atomic_read(&monpriv->read_ready) ||
atomic_read(&monpriv->iucv_severed));
if (ret)
return ret;
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return -EIO;
monmsg = monpriv->msg_array[monpriv->read_index];
}
if (!monmsg->pos)
monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
if (mon_check_mca(monmsg))
goto reply;
/* read monitor control element (12 bytes) first */
mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
count = min(count, (size_t) mce_start + 12 - monmsg->pos);
ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos == mce_start + 12)
monmsg->pos = mon_rec_start(monmsg);
goto out_copy;
}
/* read records */
if (monmsg->pos <= mon_rec_end(monmsg)) {
count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ 1);
ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos > mon_rec_end(monmsg))
mon_next_mca(monmsg);
goto out_copy;
}
reply:
ret = mon_send_reply(monmsg, monpriv);
return ret;
out_copy:
*ppos += count;
return count;
}
static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
poll_wait(filp, &mon_read_wait_queue, p);
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return POLLERR;
if (atomic_read(&monpriv->read_ready))
return POLLIN | POLLRDNORM;
return 0;
}
static const struct file_operations mon_fops = {
.owner = THIS_MODULE,
.open = &mon_open,
.release = &mon_close,
.read = &mon_read,
.poll = &mon_poll,
};
static struct miscdevice mon_dev = {
.name = "monreader",
.fops = &mon_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/******************************************************************************
* suspend / resume *
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warning("Disconnecting the z/VM *MONITOR system "
"service failed with rc=%i\n", rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
monpriv->path = NULL;
return 0;
}
static int monreader_thaw(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
rc = -ENOMEM;
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
goto out_path;
}
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed))
goto out_path;
return 0;
out_path:
rc = -EIO;
iucv_path_free(monpriv->path);
monpriv->path = NULL;
out:
atomic_set(&monpriv->iucv_severed, 1);
return rc;
}
static int monreader_restore(struct device *dev)
{
int rc;
segment_unload(mon_dcss_name);
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
panic("fatal monreader resume error: no monitor dcss\n");
}
return monreader_thaw(dev);
}
static struct dev_pm_ops monreader_pm_ops = {
.freeze = monreader_freeze,
.thaw = monreader_thaw,
.restore = monreader_restore,
};
static struct device_driver monreader_driver = {
.name = "monreader",
.bus = &iucv_bus,
.pm = &monreader_pm_ops,
};
/******************************************************************************
* module init/exit *
*****************************************************************************/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
pr_err("The z/VM *MONITOR record device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
}
/*
* Register with IUCV and connect to *MONITOR service
*/
rc = iucv_register(&monreader_iucv_handler, 1);
if (rc) {
pr_err("The z/VM *MONITOR record device driver failed to "
"register with IUCV\n");
return rc;
}
rc = driver_register(&monreader_driver);
if (rc)
goto out_iucv;
monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!monreader_device)
goto out_driver;
dev_set_name(monreader_device, "monreader-dev");
monreader_device->bus = &iucv_bus;
monreader_device->parent = iucv_root;
monreader_device->driver = &monreader_driver;
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
put_device(monreader_device);
goto out_driver;
}
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
goto out_device;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
goto out_device;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
goto out_device;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
rc = misc_register(&mon_dev);
if (rc < 0 )
goto out;
return 0;
out:
segment_unload(mon_dcss_name);
out_device:
device_unregister(monreader_device);
out_driver:
driver_unregister(&monreader_driver);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
}
static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
WARN_ON(misc_deregister(&mon_dev) != 0);
device_unregister(monreader_device);
driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}
module_init(mon_init);
module_exit(mon_exit);
module_param_string(mondcss, mon_dcss_name, 9, 0444);
MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
"service, max. 8 chars. Default is MONDCSS");
MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
MODULE_DESCRIPTION("Character device driver for reading z/VM "
"monitor service records.");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,396 @@
/*
* Character device driver for writing z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/
#define KMSG_COMPONENT "monwriter"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
#define MONWRITE_MAX_DATALEN 4010
static int mon_max_bufs = 255;
static int mon_buf_count;
struct mon_buf {
struct list_head list;
struct monwrite_hdr hdr;
int diag_done;
char *data;
};
static LIST_HEAD(mon_priv_list);
struct mon_private {
struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
struct mutex thread_mutex;
};
/*
* helper functions
*/
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
struct appldata_product_id id;
int rc;
strcpy(id.prod_nr, "LNXAPPL");
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
id.release_nr = myhdr->release;
id.mod_lvl = myhdr->mod_level;
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
if (rc <= 0)
return rc;
pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5)
return -EPERM;
return -EINVAL;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
struct monwrite_hdr *monhdr)
{
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list)
if ((entry->hdr.mon_function == monhdr->mon_function ||
monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
entry->hdr.applid == monhdr->applid &&
entry->hdr.record_num == monhdr->record_num &&
entry->hdr.version == monhdr->version &&
entry->hdr.release == monhdr->release &&
entry->hdr.mod_level == monhdr->mod_level)
return entry;
return NULL;
}
static int monwrite_new_hdr(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf;
int rc;
if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
monhdr->mon_function > MONWRITE_START_CONFIG ||
monhdr->hdrlen != sizeof(struct monwrite_hdr))
return -EINVAL;
monbuf = NULL;
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
monbuf = monwrite_find_hdr(monpriv, monhdr);
if (monbuf) {
if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
monhdr->datalen = monbuf->hdr.datalen;
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_STOP_REC);
list_del(&monbuf->list);
mon_buf_count--;
kfree(monbuf->data);
kfree(monbuf);
monbuf = NULL;
}
} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
if (mon_buf_count >= mon_max_bufs)
return -ENOSPC;
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
return -ENOMEM;
}
monbuf->hdr = *monhdr;
list_add_tail(&monbuf->list, &monpriv->list);
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
mon_buf_count++;
}
monpriv->current_buf = monbuf;
return 0;
}
static int monwrite_new_data(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf = monpriv->current_buf;
int rc = 0;
switch (monhdr->mon_function) {
case MONWRITE_START_INTERVAL:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_START_CONFIG:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_GEN_EVENT:
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_GEN_EVENT_REC);
list_del(&monpriv->current_buf->list);
kfree(monpriv->current_buf->data);
kfree(monpriv->current_buf);
monpriv->current_buf = NULL;
break;
default:
/* monhdr->mon_function is checked in monwrite_new_hdr */
BUG();
}
return rc;
}
/*
* file operations
*/
static int monwrite_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
lock_kernel();
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
unlock_kernel();
return nonseekable_open(inode, filp);
}
static int monwrite_close(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv = filp->private_data;
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list) {
if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&entry->hdr, entry->data,
APPLDATA_STOP_REC);
mon_buf_count--;
list_del(&entry->list);
kfree(entry->data);
kfree(entry);
}
list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
static ssize_t monwrite_write(struct file *filp, const char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
size_t len, written;
void *to;
int rc;
mutex_lock(&monpriv->thread_mutex);
for (written = 0; written < count; ) {
if (monpriv->hdr_to_read) {
len = min(count - written, monpriv->hdr_to_read);
to = (char *) &monpriv->hdr +
sizeof(monpriv->hdr) - monpriv->hdr_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->hdr_to_read -= len;
written += len;
if (monpriv->hdr_to_read > 0)
continue;
rc = monwrite_new_hdr(monpriv);
if (rc)
goto out_error;
monpriv->data_to_read = monpriv->current_buf ?
monpriv->current_buf->hdr.datalen : 0;
}
if (monpriv->data_to_read) {
len = min(count - written, monpriv->data_to_read);
to = monpriv->current_buf->data +
monpriv->hdr.datalen - monpriv->data_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->data_to_read -= len;
written += len;
if (monpriv->data_to_read > 0)
continue;
rc = monwrite_new_data(monpriv);
if (rc)
goto out_error;
}
monpriv->hdr_to_read = sizeof(monpriv->hdr);
}
mutex_unlock(&monpriv->thread_mutex);
return written;
out_error:
monpriv->data_to_read = 0;
monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
mutex_unlock(&monpriv->thread_mutex);
return rc;
}
static const struct file_operations monwrite_fops = {
.owner = THIS_MODULE,
.open = &monwrite_open,
.release = &monwrite_close,
.write = &monwrite_write,
};
static struct miscdevice mon_dev = {
.name = "monwriter",
.fops = &monwrite_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/*
* suspend/resume
*/
static int monwriter_freeze(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_STOP_REC);
}
}
return 0;
}
static int monwriter_restore(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
}
}
return 0;
}
static int monwriter_thaw(struct device *dev)
{
return monwriter_restore(dev);
}
static struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
};
static struct platform_driver monwriter_pdrv = {
.driver = {
.name = "monwriter",
.owner = THIS_MODULE,
.pm = &monwriter_pm_ops,
},
};
static struct platform_device *monwriter_pdev;
/*
* module init/exit
*/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
rc = platform_driver_register(&monwriter_pdrv);
if (rc)
return rc;
monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
0);
if (IS_ERR(monwriter_pdev)) {
rc = PTR_ERR(monwriter_pdev);
goto out_driver;
}
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
return 0;
out_device:
platform_device_unregister(monwriter_pdev);
out_driver:
platform_driver_unregister(&monwriter_pdrv);
return rc;
}
static void __exit mon_exit(void)
{
WARN_ON(misc_deregister(&mon_dev) != 0);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);
module_exit(mon_exit);
module_param_named(max_bufs, mon_max_bufs, int, 0644);
MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
"that can be active at one time");
MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
MODULE_DESCRIPTION("Character device driver for writing z/VM "
"APPLDATA monitor records.");
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,279 @@
/*
* IBM/3270 Driver
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <asm/idals.h>
#include <asm/ioctl.h>
/* ioctls for fullscreen 3270 */
#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
/* Local Channel Commands */
#define TC_WRITE 0x01 /* Write */
#define TC_RDBUF 0x02 /* Read Buffer */
#define TC_EWRITE 0x05 /* Erase write */
#define TC_READMOD 0x06 /* Read modified */
#define TC_EWRITEA 0x0d /* Erase write alternate */
#define TC_WRITESF 0x11 /* Write structured field */
/* Buffer Control Orders */
#define TO_SF 0x1d /* Start field */
#define TO_SBA 0x11 /* Set buffer address */
#define TO_IC 0x13 /* Insert cursor */
#define TO_PT 0x05 /* Program tab */
#define TO_RA 0x3c /* Repeat to address */
#define TO_SFE 0x29 /* Start field extended */
#define TO_EUA 0x12 /* Erase unprotected to address */
#define TO_MF 0x2c /* Modify field */
#define TO_SA 0x28 /* Set attribute */
/* Field Attribute Bytes */
#define TF_INPUT 0x40 /* Visible input */
#define TF_INPUTN 0x4c /* Invisible input */
#define TF_INMDT 0xc1 /* Visible, Set-MDT */
#define TF_LOG 0x60
/* Character Attribute Bytes */
#define TAT_RESET 0x00
#define TAT_FIELD 0xc0
#define TAT_EXTHI 0x41
#define TAT_COLOR 0x42
#define TAT_CHARS 0x43
#define TAT_TRANS 0x46
/* Extended-Highlighting Bytes */
#define TAX_RESET 0x00
#define TAX_BLINK 0xf1
#define TAX_REVER 0xf2
#define TAX_UNDER 0xf4
/* Reset value */
#define TAR_RESET 0x00
/* Color values */
#define TAC_RESET 0x00
#define TAC_BLUE 0xf1
#define TAC_RED 0xf2
#define TAC_PINK 0xf3
#define TAC_GREEN 0xf4
#define TAC_TURQ 0xf5
#define TAC_YELLOW 0xf6
#define TAC_WHITE 0xf7
#define TAC_DEFAULT 0x00
/* Write Control Characters */
#define TW_NONE 0x40 /* No particular action */
#define TW_KR 0xc2 /* Keyboard restore */
#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
#define RAW3270_FIRSTMINOR 1 /* First minor number */
#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */
/* For TUBGETMOD and TUBSETMOD. Should include. */
struct raw3270_iocb {
short model;
short line_cnt;
short col_cnt;
short pf_cnt;
short re_cnt;
short map;
};
struct raw3270;
struct raw3270_view;
/* 3270 CCW request */
struct raw3270_request {
struct list_head list; /* list head for request queueing. */
struct raw3270_view *view; /* view of this request */
struct ccw1 ccw; /* single ccw. */
void *buffer; /* output buffer. */
size_t size; /* size of output buffer. */
int rescnt; /* residual count from devstat. */
int rc; /* return code for this request. */
/* Callback for delivering final status. */
void (*callback)(struct raw3270_request *, void *);
void *callback_data;
};
struct raw3270_request *raw3270_request_alloc(size_t size);
struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
void raw3270_request_free(struct raw3270_request *);
void raw3270_request_reset(struct raw3270_request *);
void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
static inline int
raw3270_request_final(struct raw3270_request *rq)
{
return list_empty(&rq->list);
}
void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
/* Return value of *intv (see raw3270_fn below) can be one of the following: */
#define RAW3270_IO_DONE 0 /* request finished */
#define RAW3270_IO_BUSY 1 /* request still active */
#define RAW3270_IO_RETRY 2 /* retry current request */
#define RAW3270_IO_STOP 3 /* kill current request */
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
int (*activate)(struct raw3270_view *);
void (*deactivate)(struct raw3270_view *);
int (*intv)(struct raw3270_view *,
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
};
/*
* View structure chaining. The raw3270_view structure is meant to
* be embedded at the start of the real view data structure, e.g.:
* struct example {
* struct raw3270_view view;
* ...
* };
*/
struct raw3270_view {
struct list_head list;
spinlock_t lock;
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
unsigned int model;
unsigned int rows, cols; /* # of rows & colums of the view */
unsigned char *ascebc; /* ascii -> ebcdic table */
};
int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
int raw3270_start(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
int raw3270_reset(struct raw3270_view *);
struct raw3270_view *raw3270_view(struct raw3270_view *);
/* Reference count inliner for view structures. */
static inline void
raw3270_get_view(struct raw3270_view *view)
{
atomic_inc(&view->ref_count);
}
extern wait_queue_head_t raw3270_wait_queue;
static inline void
raw3270_put_view(struct raw3270_view *view)
{
if (atomic_dec_return(&view->ref_count) == 0)
wake_up(&raw3270_wait_queue);
}
struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
int raw3270_register_notifier(void (*notifier)(int, int));
void raw3270_unregister_notifier(void (*notifier)(int, int));
void raw3270_pm_unfreeze(struct raw3270_view *);
/*
* Little memory allocator for string objects.
*/
struct string
{
struct list_head list;
struct list_head update;
unsigned long size;
unsigned long len;
char string[0];
} __attribute__ ((aligned(8)));
static inline struct string *
alloc_string(struct list_head *free_list, unsigned long len)
{
struct string *cs, *tmp;
unsigned long size;
size = (len + 7L) & -8L;
list_for_each_entry(cs, free_list, list) {
if (cs->size < size)
continue;
if (cs->size > size + sizeof(struct string)) {
char *endaddr = (char *) (cs + 1) + cs->size;
tmp = (struct string *) (endaddr - size) - 1;
tmp->size = size;
cs->size -= size + sizeof(struct string);
cs = tmp;
} else
list_del(&cs->list);
cs->len = len;
INIT_LIST_HEAD(&cs->list);
INIT_LIST_HEAD(&cs->update);
return cs;
}
return NULL;
}
static inline unsigned long
free_string(struct list_head *free_list, struct string *cs)
{
struct string *tmp;
struct list_head *p, *left;
/* Find out the left neighbour in free memory list. */
left = free_list;
list_for_each(p, free_list) {
if (list_entry(p, struct string, list) > cs)
break;
left = p;
}
/* Try to merge with right neighbour = next element from left. */
if (left->next != free_list) {
tmp = list_entry(left->next, struct string, list);
if ((char *) (cs + 1) + cs->size == (char *) tmp) {
list_del(&tmp->list);
cs->size += tmp->size + sizeof(struct string);
}
}
/* Try to merge with left neighbour. */
if (left != free_list) {
tmp = list_entry(left, struct string, list);
if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
tmp->size += cs->size + sizeof(struct string);
return tmp->size;
}
}
__list_add(&cs->list, left, left->next);
return cs->size;
}
static inline void
add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
{
struct string *cs;
cs = (struct string *) mem;
cs->size = size - sizeof(struct string);
free_string(free_list, cs);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,189 @@
/*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_H__
#define __SCLP_H__
#include <linux/types.h>
#include <linux/list.h>
#include <asm/sclp.h>
#include <asm/ebcdic.h>
/* maximum number of pages concerning our own memory management */
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define MAX_CONSOLE_PAGES 6
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
#define EVTYP_CNTLPROGOPCMD 0x20
#define EVTYP_CNTLPROGIDENT 0x0B
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
#define EVTYP_ASYNC 0x0A
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
#define EVTYP_STATECHANGE_MASK 0x01000000
#define EVTYP_PMSGCMD_MASK 0x00800000
#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
#define EVTYP_CTLPROGIDENT_MASK 0x00200000
#define EVTYP_SIGQUIESCE_MASK 0x00000008
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
#define EVTYP_ASYNC_MASK 0x00400000
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
#define GNRLMSGFLGS_HOLDMSG 0x2000
#define LNTPFLGS_CNTLTEXT 0x8000
#define LNTPFLGS_LABELTEXT 0x4000
#define LNTPFLGS_DATATEXT 0x2000
#define LNTPFLGS_ENDTEXT 0x1000
#define LNTPFLGS_PROMPTTEXT 0x0800
typedef unsigned int sclp_cmdw_t;
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
#define GDS_ID_MDSMU 0x1310
#define GDS_ID_MDSROUTEINFO 0x1311
#define GDS_ID_AGUNWRKCORR 0x1549
#define GDS_ID_SNACONDREPORT 0x1532
#define GDS_ID_CPMSU 0x1212
#define GDS_ID_ROUTTARGINSTR 0x154D
#define GDS_ID_OPREQ 0x8070
#define GDS_ID_TEXTCMD 0x1320
#define GDS_KEY_SELFDEFTEXTMSG 0x31
enum sclp_pm_event {
SCLP_PM_EVENT_FREEZE,
SCLP_PM_EVENT_THAW,
SCLP_PM_EVENT_RESTORE,
};
#define SCLP_PANIC_PRIO 1
#define SCLP_PANIC_PRIO_CLIENT 0
typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
struct sccb_header {
u16 length;
u8 function_code;
u8 control_mask[3];
u16 response_code;
} __attribute__((packed));
extern u64 sclp_facilities;
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
struct gds_subvector {
u8 length;
u8 key;
} __attribute__((packed));
struct gds_vector {
u16 length;
u16 gds_id;
} __attribute__((packed));
struct evbuf_header {
u16 length;
u8 type;
u8 flags;
u16 _reserved;
} __attribute__((packed));
struct sclp_req {
struct list_head list; /* list_head for request queueing. */
sclp_cmdw_t command; /* sclp command to execute */
void *sccb; /* pointer to the sccb to execute */
char status; /* status of this request */
int start_count; /* number of SVCs done for this req */
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_req *, void *data);
void *callback_data;
};
#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
/* function pointers that a high level driver has to use for registration */
/* of some routines it wants to be called from the low level driver */
struct sclp_register {
struct list_head list;
/* User wants to receive: */
sccb_mask_t receive_mask;
/* User wants to send: */
sccb_mask_t send_mask;
/* H/W can receive: */
sccb_mask_t sclp_receive_mask;
/* H/W can send: */
sccb_mask_t sclp_send_mask;
/* called if event type availability changes */
void (*state_change_fn)(struct sclp_register *);
/* called for events in cp_receive_mask/sclp_receive_mask */
void (*receiver_fn)(struct evbuf_header *);
/* called for power management events */
void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
/* pm event posted flag */
int pm_event_posted;
};
/* externals from sclp.c */
int sclp_add_request(struct sclp_req *req);
void sclp_sync_wait(void);
int sclp_register(struct sclp_register *reg);
void sclp_unregister(struct sclp_register *reg);
int sclp_remove_processed(struct sccb_header *sccb);
int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sdias_init(void);
void sclp_sdias_exit(void);
/* useful inlines */
/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
/* translate single character from ASCII to EBCDIC */
static inline unsigned char
sclp_ascebc(unsigned char ch)
{
return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
}
/* translate string from EBCDIC to ASCII */
static inline void
sclp_ebcasc_str(unsigned char *str, int nr)
{
(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
sclp_ascebc_str(unsigned char *str, int nr)
{
(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
#endif /* __SCLP_H__ */

View File

@@ -0,0 +1,213 @@
/*
* Enable Asynchronous Notification via SCLP.
*
* Copyright IBM Corp. 2009
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include "sclp.h"
static int callhome_enabled;
static struct sclp_req *request;
static struct sclp_async_sccb *sccb;
static int sclp_async_send_wait(char *message);
static struct ctl_table_header *callhome_sysctl_header;
static DEFINE_SPINLOCK(sclp_async_lock);
#define SCLP_NORMAL_WRITE 0x00
struct async_evbuf {
struct evbuf_header header;
u64 reserved;
u8 rflags;
u8 empty;
u8 rtype;
u8 otype;
char comp_id[12];
char data[3000]; /* there is still some space left */
} __attribute__((packed));
struct sclp_async_sccb {
struct sccb_header header;
struct async_evbuf evbuf;
} __attribute__((packed));
static struct sclp_register sclp_async_register = {
.send_mask = EVTYP_ASYNC_MASK,
};
static int call_home_on_panic(struct notifier_block *self,
unsigned long event, void *data)
{
strncat(data, init_utsname()->nodename,
sizeof(init_utsname()->nodename));
sclp_async_send_wait(data);
return NOTIFY_DONE;
}
static struct notifier_block call_home_panic_nb = {
.notifier_call = call_home_on_panic,
.priority = INT_MAX,
};
static int proc_handler_callhome(struct ctl_table *ctl, int write,
void __user *buffer, size_t *count,
loff_t *ppos)
{
unsigned long val;
int len, rc;
char buf[3];
if (!*count || (*ppos && !write)) {
*count = 0;
return 0;
}
if (!write) {
len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
rc = copy_to_user(buffer, buf, sizeof(buf));
if (rc != 0)
return -EFAULT;
} else {
len = *count;
rc = copy_from_user(buf, buffer, sizeof(buf));
if (rc != 0)
return -EFAULT;
if (strict_strtoul(buf, 0, &val) != 0)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
}
*count = len;
*ppos += len;
return 0;
}
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
.mode = 0644,
.proc_handler = proc_handler_callhome,
},
{ .ctl_name = 0 }
};
static struct ctl_table kern_dir_table[] = {
{
.ctl_name = CTL_KERN,
.procname = "kernel",
.maxlen = 0,
.mode = 0555,
.child = callhome_table,
},
{ .ctl_name = 0 }
};
/*
* Function used to transfer asynchronous notification
* records which waits for send completion
*/
static int sclp_async_send_wait(char *message)
{
struct async_evbuf *evb;
int rc;
unsigned long flags;
if (!callhome_enabled)
return 0;
sccb->evbuf.header.type = EVTYP_ASYNC;
sccb->evbuf.rtype = 0xA5;
sccb->evbuf.otype = 0x00;
evb = &sccb->evbuf;
request->command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
/*
* Retain Queue
* e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
*/
strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
sccb->evbuf.header.length = sizeof(sccb->evbuf);
sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
sccb->header.function_code = SCLP_NORMAL_WRITE;
rc = sclp_add_request(request);
if (rc)
return rc;
spin_lock_irqsave(&sclp_async_lock, flags);
while (request->status != SCLP_REQ_DONE &&
request->status != SCLP_REQ_FAILED) {
sclp_sync_wait();
}
spin_unlock_irqrestore(&sclp_async_lock, flags);
if (request->status != SCLP_REQ_DONE)
return -EIO;
rc = ((struct sclp_async_sccb *)
request->sccb)->header.response_code;
if (rc != 0x0020)
return -EIO;
if (evb->header.flags != 0x80)
return -EIO;
return rc;
}
static int __init sclp_async_init(void)
{
int rc;
rc = sclp_register(&sclp_async_register);
if (rc)
return rc;
rc = -EOPNOTSUPP;
if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
goto out_sclp;
rc = -ENOMEM;
callhome_sysctl_header = register_sysctl_table(kern_dir_table);
if (!callhome_sysctl_header)
goto out_sclp;
request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!request || !sccb)
goto out_mem;
rc = atomic_notifier_chain_register(&panic_notifier_list,
&call_home_panic_nb);
if (!rc)
goto out;
out_mem:
kfree(request);
free_page((unsigned long) sccb);
unregister_sysctl_table(callhome_sysctl_header);
out_sclp:
sclp_unregister(&sclp_async_register);
out:
return rc;
}
module_init(sclp_async_init);
static void __exit sclp_async_exit(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&call_home_panic_nb);
unregister_sysctl_table(callhome_sysctl_header);
sclp_unregister(&sclp_async_register);
free_page((unsigned long) sccb);
kfree(request);
}
module_exit(sclp_async_exit);
MODULE_AUTHOR("Copyright IBM Corp. 2009");
MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");

View File

@@ -0,0 +1,751 @@
/*
* Copyright IBM Corp. 2007, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/platform_device.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#include "sclp.h"
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
u8 _reserved0[24 - 11]; /* 11-15 */
u8 loadparm[8]; /* 24-31 */
u8 _reserved1[48 - 32]; /* 32-47 */
u64 facilities; /* 48-55 */
u8 _reserved2[84 - 56]; /* 56-83 */
u8 fac84; /* 84 */
u8 _reserved3[91 - 85]; /* 85-90 */
u8 flags; /* 91 */
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __attribute__((packed, aligned(PAGE_SIZE)));
static struct read_info_sccb __initdata early_read_info_sccb;
static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
static unsigned long long rzm;
static unsigned long long rnmax;
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{
int rc;
__ctl_set_bit(0, 9);
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
__load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
PSW_MASK_WAIT | PSW_DEFAULT_KEY);
local_irq_disable();
out:
/* Contents of the sccb might have changed. */
barrier();
__ctl_clear_bit(0, 9);
return rc;
}
static void __init sclp_read_info_early(void)
{
int rc;
int i;
struct read_info_sccb *sccb;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
sccb = &early_read_info_sccb;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.control_mask[2] = 0x80;
rc = sclp_cmd_sync_early(commands[i], sccb);
} while (rc == -EBUSY);
if (rc)
break;
if (sccb->header.response_code == 0x10) {
early_read_info_sccb_valid = 1;
break;
}
if (sccb->header.response_code != 0x1f0)
break;
}
}
void __init sclp_facilities_detect(void)
{
struct read_info_sccb *sccb;
sclp_read_info_early();
if (!early_read_info_sccb_valid)
return;
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
}
unsigned long long sclp_get_rnmax(void)
{
return rnmax;
}
unsigned long long sclp_get_rzm(void)
{
return rzm;
}
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
struct read_info_sccb *sccb;
if (!early_read_info_sccb_valid)
return;
sccb = &early_read_info_sccb;
info->is_valid = 1;
if (sccb->flags & 0x2)
info->has_dump = 1;
memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
}
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
{
struct completion completion;
struct sclp_req *request;
int rc;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->command = cmd;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
request->callback = sclp_sync_callback;
request->callback_data = &completion;
init_completion(&completion);
/* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
/* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warning("sync request failed (cmd=0x%08x, "
"status=0x%02x)\n", cmd, request->status);
rc = -EIO;
}
out:
kfree(request);
return rc;
}
/*
* CPU configuration related functions.
*/
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
struct read_cpu_info_sccb {
struct sccb_header header;
u16 nr_configured;
u16 offset_configured;
u16 nr_standby;
u16 offset_standby;
u8 reserved[4096 - 16];
} __attribute__((packed, aligned(PAGE_SIZE)));
static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
struct read_cpu_info_sccb *sccb)
{
char *page = (char *) sccb;
memset(info, 0, sizeof(*info));
info->configured = sccb->nr_configured;
info->standby = sccb->nr_standby;
info->combined = sccb->nr_configured + sccb->nr_standby;
info->has_cpu_type = sclp_fac84 & 0x1;
memcpy(&info->cpu, page + sccb->offset_configured,
info->combined * sizeof(struct sclp_cpu_entry));
}
int sclp_get_cpu_info(struct sclp_cpu_info *info)
{
int rc;
struct read_cpu_info_sccb *sccb;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warning("readcpuinfo failed (response=0x%04x)\n",
sccb->header.response_code);
rc = -EIO;
goto out;
}
sclp_fill_cpu_info(info, sccb);
out:
free_page((unsigned long) sccb);
return rc;
}
struct cpu_configure_sccb {
struct sccb_header header;
} __attribute__((packed, aligned(8)));
static int do_cpu_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
int rc;
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
* This is not going to cross a page boundary since we force
* kmalloc to have a minimum alignment of 8 bytes on s390.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = do_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warning("configure cpu failed (cmd=0x%08x, "
"response=0x%04x)\n", cmd,
sccb->header.response_code);
rc = -EIO;
break;
}
out:
kfree(sccb);
return rc;
}
int sclp_cpu_configure(u8 cpu)
{
return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
}
int sclp_cpu_deconfigure(u8 cpu)
{
return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
static int sclp_mem_state_changed;
struct memory_increment {
struct list_head list;
u16 rn;
int standby;
int usecount;
};
struct assign_storage_sccb {
struct sccb_header header;
u16 rn;
} __packed;
static unsigned long long rn2addr(u16 rn)
{
return (unsigned long long) (rn - 1) * rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
{
struct assign_storage_sccb *sccb;
int rc;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
rc = do_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warning("assign storage failed (cmd=0x%08x, "
"response=0x%04x, rn=0x%04x)\n", cmd,
sccb->header.response_code, rn);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
return do_assign_storage(0x000d0001, rn);
}
static int sclp_unassign_storage(u16 rn)
{
return do_assign_storage(0x000c0001, rn);
}
struct attach_storage_sccb {
struct sccb_header header;
u16 :16;
u16 assigned;
u32 :32;
u32 entries[0];
} __packed;
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
int rc;
int i;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
rc = do_sync_request(0x00080001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++)
sclp_unassign_storage(sccb->entries[i] >> 16);
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_mem_change_state(unsigned long start, unsigned long size,
int online)
{
struct memory_increment *incr;
unsigned long long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
if (start > istart + rzm - 1)
continue;
if (online) {
if (incr->usecount++)
continue;
/*
* Don't break the loop if one assign fails. Loop may
* be walked again on CANCEL and we can't save
* information if state changed before or not.
* So continue and increase usecount for all increments.
*/
rc |= sclp_assign_storage(incr->rn);
} else {
if (--incr->usecount)
continue;
sclp_unassign_storage(incr->rn);
}
}
return rc ? -EIO : 0;
}
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long start, size;
struct memory_notify *arg;
unsigned char id;
int rc = 0;
arg = data;
start = arg->start_pfn << PAGE_SHIFT;
size = arg->nr_pages << PAGE_SHIFT;
mutex_lock(&sclp_mem_mutex);
for (id = 0; id <= sclp_max_storage_id; id++)
if (!test_bit(id, sclp_storage_ids))
sclp_attach_storage(id);
switch (action) {
case MEM_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_OFFLINE:
break;
case MEM_GOING_ONLINE:
rc = sclp_mem_change_state(start, size, 1);
break;
case MEM_CANCEL_ONLINE:
sclp_mem_change_state(start, size, 0);
break;
case MEM_OFFLINE:
sclp_mem_change_state(start, size, 0);
break;
default:
rc = -EINVAL;
break;
}
if (!rc)
sclp_mem_state_changed = 1;
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
static void __init add_memory_merged(u16 rn)
{
static u16 first_rn, num;
unsigned long long start, size;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
return;
}
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long ) num * rzm;
if (start >= VMEM_MAX_PHYS)
goto skip_add;
if (start + size > VMEM_MAX_PHYS)
size = VMEM_MAX_PHYS - start;
if (memory_end_set && (start >= memory_end))
goto skip_add;
if (memory_end_set && (start + size > memory_end))
size = memory_end - start;
add_memory(0, start, size);
skip_add:
first_rn = rn;
num = 1;
}
static void __init sclp_add_standby_memory(void)
{
struct memory_increment *incr;
list_for_each_entry(incr, &sclp_mem_list, list)
if (incr->standby)
add_memory_merged(incr->rn);
add_memory_merged(0);
}
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
struct list_head *prev;
u16 last_rn;
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
if (!new_incr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
if (assigned && incr->rn > rn)
break;
if (!assigned && incr->rn - last_rn > 1)
break;
last_rn = incr->rn;
prev = &incr->list;
}
if (!assigned)
new_incr->rn = last_rn + 1;
if (new_incr->rn > rnmax) {
kfree(new_incr);
return;
}
list_add(&new_incr->list, prev);
}
static int sclp_mem_freeze(struct device *dev)
{
if (!sclp_mem_state_changed)
return 0;
pr_err("Memory hotplug state changed, suspend refused.\n");
return -EPERM;
}
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
static struct platform_driver sclp_mem_pdrv = {
.driver = {
.name = "sclp_mem",
.pm = &sclp_mem_pm_ops,
},
};
static int __init sclp_detect_standby_memory(void)
{
struct platform_device *sclp_pdev;
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
if (!early_read_info_sccb_valid)
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = do_sync_request(0x00040001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0010:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 0, 1);
}
break;
case 0x0310:
break;
case 0x0410:
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 1, 1);
}
break;
default:
rc = -EIO;
break;
}
if (!rc)
sclp_max_storage_id = sccb->max_id;
}
if (rc || list_empty(&sclp_mem_list))
goto out;
for (i = 1; i <= rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
rc = platform_driver_register(&sclp_mem_pdrv);
if (rc)
goto out;
sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
if (rc)
goto out_driver;
sclp_add_standby_memory();
goto out;
out_driver:
platform_driver_unregister(&sclp_mem_pdrv);
out:
free_page((unsigned long) sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Channel path configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
struct chp_cfg_sccb {
struct sccb_header header;
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = do_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
case 0x0440:
case 0x0450:
break;
default:
pr_warning("configure channel-path failed "
"(cmd=0x%08x, response=0x%04x)\n", cmd,
sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
/**
* sclp_chp_configure - perform configure channel-path sclp command
* @chpid: channel-path ID
*
* Perform configure channel-path command sclp command for specified chpid.
* Return 0 after command successfully finished, non-zero otherwise.
*/
int sclp_chp_configure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
}
/**
* sclp_chp_deconfigure - perform deconfigure channel-path sclp command
* @chpid: channel-path ID
*
* Perform deconfigure channel-path command sclp command for specified chpid
* and wait for completion. On success return 0. Return non-zero otherwise.
*/
int sclp_chp_deconfigure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
struct chp_info_sccb {
struct sccb_header header;
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
*
* Perform read channel-path information sclp command and wait for completion.
* On success, store channel-path information in @info and return 0. Return
* non-zero otherwise.
*/
int sclp_chp_read_info(struct sclp_chp_info *info)
{
struct chp_info_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warning("read channel-path info failed "
"(response=0x%04x)\n", sccb->header.response_code);
rc = -EIO;
goto out;
}
memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
free_page((unsigned long) sccb);
return rc;
}

View File

@@ -0,0 +1,326 @@
/*
* SCLP line mode console driver
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
#define sclp_console_major 4 /* TTYAUX_MAJOR */
#define sclp_console_minor 64
#define sclp_console_name "ttyS"
/* Lock to guard over changes to global variables */
static spinlock_t sclp_con_lock;
/* List of free pages that can be used for console output buffering */
static struct list_head sclp_con_pages;
/* List of full struct sclp_buffer structures ready for output */
static struct list_head sclp_con_outqueue;
/* Pointer to current console buffer */
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
/* Suspend mode flag */
static int sclp_con_suspended;
/* Flag that output queue is currently running */
static int sclp_con_queue_running;
/* Output format for console messages */
static unsigned short sclp_con_columns;
static unsigned short sclp_con_width_htab;
static void
sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_con_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
list_add_tail((struct list_head *) page, &sclp_con_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_con_outqueue))
buffer = list_first_entry(&sclp_con_outqueue,
struct sclp_buffer, list);
if (!buffer || sclp_con_suspended) {
sclp_con_queue_running = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
}
/*
* Finalize and emit first pending buffer.
*/
static void sclp_conbuf_emit(void)
{
struct sclp_buffer* buffer;
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_con_lock, flags);
if (sclp_conbuf)
list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
if (sclp_con_queue_running || sclp_con_suspended)
goto out_unlock;
if (list_empty(&sclp_con_outqueue))
goto out_unlock;
buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
list);
sclp_con_queue_running = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
if (rc)
sclp_conbuf_callback(buffer, rc);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* Wait until out queue is empty
*/
static void sclp_console_sync_queue(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer without further waiting on a final new line.
*/
static void
sclp_console_timeout(unsigned long data)
{
sclp_conbuf_emit();
}
/*
* Writes the given message to S390 system console
*/
static void
sclp_console_write(struct console *console, const char *message,
unsigned int count)
{
unsigned long flags;
void *page;
int written;
if (count == 0)
return;
spin_lock_irqsave(&sclp_con_lock, flags);
/*
* process escape characters, write message into buffer,
* send buffer to SCLP
*/
do {
/* make sure we have a console output buffer */
if (sclp_conbuf == NULL) {
while (list_empty(&sclp_con_pages)) {
if (sclp_con_suspended)
goto out;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
page = sclp_con_pages.next;
list_del((struct list_head *) page);
sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
sclp_con_width_htab);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_conbuf, (const unsigned char *)
message, count);
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
spin_lock_irqsave(&sclp_con_lock, flags);
message += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
init_timer(&sclp_con_timer);
sclp_con_timer.function = sclp_console_timeout;
sclp_con_timer.data = 0UL;
sclp_con_timer.expires = jiffies + HZ/10;
add_timer(&sclp_con_timer);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
static struct tty_driver *
sclp_console_device(struct console *c, int *index)
{
*index = c->index;
return sclp_tty_driver;
}
/*
* Make sure that all buffers will be flushed to the SCLP.
*/
static void
sclp_console_flush(void)
{
sclp_conbuf_emit();
sclp_console_sync_queue();
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_console_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_console_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_console_flush();
}
static int sclp_console_notify(struct notifier_block *self,
unsigned long event, void *data)
{
sclp_console_flush();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
.priority = SCLP_PANIC_PRIO_CLIENT,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_console_notify,
.priority = 1,
};
/*
* used to register the SCLP console to the kernel and to
* give printk necessary information
*/
static struct console sclp_console =
{
.name = sclp_console_name,
.write = sclp_console_write,
.device = sclp_console_device,
.flags = CON_PRINTBUFFER,
.index = 0 /* ttyS0 */
};
/*
* This function is called for SCLP suspend and resume events.
*/
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_console_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_console_resume();
break;
}
}
/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
sclp_console_init(void)
{
void *page;
int i;
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
rc = sclp_rw_init();
if (rc)
return rc;
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
sclp_conbuf = NULL;
init_timer(&sclp_con_timer);
/* Set output format */
if (MACHINE_IS_VM)
/*
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_con_columns = 76;
else
sclp_con_columns = 80;
sclp_con_width_htab = 8;
/* enable printk-access to this driver */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_console);
return 0;
}
console_initcall(sclp_console_init);

View File

@@ -0,0 +1,90 @@
/*
* drivers/s390/char/sclp_config.c
*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_config"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/sysdev.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
u8 ev_qualifier;
} __attribute__((packed));
#define EV_QUAL_CPU_CHANGE 1
#define EV_QUAL_CAP_CHANGE 3
static struct work_struct sclp_cpu_capability_work;
static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
struct sys_device *sysdev;
pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
sysdev = get_cpu_sysdev(cpu);
kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
smp_rescan_cpus();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
{
struct conf_mgm_data *cdata;
cdata = (struct conf_mgm_data *)(evbuf + 1);
switch (cdata->ev_qualifier) {
case EV_QUAL_CPU_CHANGE:
schedule_work(&sclp_cpu_change_work);
break;
case EV_QUAL_CAP_CHANGE:
schedule_work(&sclp_cpu_capability_work);
break;
}
}
static struct sclp_register sclp_conf_register =
{
.receive_mask = EVTYP_CONFMGMDATA_MASK,
.receiver_fn = sclp_conf_receiver_fn,
};
static int __init sclp_conf_init(void)
{
int rc;
INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
rc = sclp_register(&sclp_conf_register);
if (rc)
return rc;
if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
pr_warning("no configuration management.\n");
sclp_unregister(&sclp_conf_register);
rc = -ENOSYS;
}
return rc;
}
__initcall(sclp_conf_init);

View File

@@ -0,0 +1,41 @@
/*
* drivers/s390/char/sclp_cpi.c
* SCLP control programm identification
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Michael Ernst <mernst@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include "sclp_cpi_sys.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Identify this operating system instance "
"to the System z hardware");
MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, "
"Michael Ernst <mernst@de.ibm.com>");
static char *system_name = "";
static char *sysplex_name = "";
module_param(system_name, charp, 0);
MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
module_param(sysplex_name, charp, 0);
MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
static int __init cpi_module_init(void)
{
return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
LINUX_VERSION_CODE);
}
static void __exit cpi_module_exit(void)
{
}
module_init(cpi_module_init);
module_exit(cpi_module_exit);

View File

@@ -0,0 +1,429 @@
/*
* drivers/s390/char/sclp_cpi_sys.c
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Michael Ernst <mernst@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cpi"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_cpi_sys.h"
#define CPI_LENGTH_NAME 8
#define CPI_LENGTH_LEVEL 16
static DEFINE_MUTEX(sclp_cpi_mutex);
struct cpi_evbuf {
struct evbuf_header header;
u8 id_format;
u8 reserved0;
u8 system_type[CPI_LENGTH_NAME];
u64 reserved1;
u8 system_name[CPI_LENGTH_NAME];
u64 reserved2;
u64 system_level;
u64 reserved3;
u8 sysplex_name[CPI_LENGTH_NAME];
u8 reserved4[16];
} __attribute__((packed));
struct cpi_sccb {
struct sccb_header header;
struct cpi_evbuf cpi_evbuf;
} __attribute__((packed));
static struct sclp_register sclp_cpi_event = {
.send_mask = EVTYP_CTLPROGIDENT_MASK,
};
static char system_name[CPI_LENGTH_NAME + 1];
static char sysplex_name[CPI_LENGTH_NAME + 1];
static char system_type[CPI_LENGTH_NAME + 1];
static u64 system_level;
static void set_data(char *field, char *data)
{
memset(field, ' ', CPI_LENGTH_NAME);
memcpy(field, data, strlen(data));
sclp_ascebc_str(field, CPI_LENGTH_NAME);
}
static void cpi_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static struct sclp_req *cpi_prepare_req(void)
{
struct sclp_req *req;
struct cpi_sccb *sccb;
struct cpi_evbuf *evb;
req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
kfree(req);
return ERR_PTR(-ENOMEM);
}
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
sccb->cpi_evbuf.header.type = 0x0b;
evb = &sccb->cpi_evbuf;
/* set system type */
set_data(evb->system_type, system_type);
/* set system name */
set_data(evb->system_name, system_name);
/* set sytem level */
evb->system_level = system_level;
/* set sysplex name */
set_data(evb->sysplex_name, sysplex_name);
/* prepare request data structure presented to SCLP driver */
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = cpi_callback;
return req;
}
static void cpi_free_req(struct sclp_req *req)
{
free_page((unsigned long) req->sccb);
kfree(req);
}
static int cpi_req(void)
{
struct completion completion;
struct sclp_req *req;
int rc;
int response;
rc = sclp_register(&sclp_cpi_event);
if (rc)
goto out;
if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
rc = -EOPNOTSUPP;
goto out_unregister;
}
req = cpi_prepare_req();
if (IS_ERR(req)) {
rc = PTR_ERR(req);
goto out_unregister;
}
init_completion(&completion);
req->callback_data = &completion;
/* Add request to sclp queue */
rc = sclp_add_request(req);
if (rc)
goto out_free_req;
wait_for_completion(&completion);
if (req->status != SCLP_REQ_DONE) {
pr_warning("request failed (status=0x%02x)\n",
req->status);
rc = -EIO;
goto out_free_req;
}
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
if (response != 0x0020) {
pr_warning("request failed with response code 0x%x\n",
response);
rc = -EIO;
}
out_free_req:
cpi_free_req(req);
out_unregister:
sclp_unregister(&sclp_cpi_event);
out:
return rc;
}
static int check_string(const char *attr, const char *str)
{
size_t len;
size_t i;
len = strlen(str);
if ((len > 0) && (str[len - 1] == '\n'))
len--;
if (len > CPI_LENGTH_NAME)
return -EINVAL;
for (i = 0; i < len ; i++) {
if (isalpha(str[i]) || isdigit(str[i]) ||
strchr("$@# ", str[i]))
continue;
return -EINVAL;
}
return 0;
}
static void set_string(char *attr, const char *value)
{
size_t len;
size_t i;
len = strlen(value);
if ((len > 0) && (value[len - 1] == '\n'))
len--;
for (i = 0; i < CPI_LENGTH_NAME; i++) {
if (i < len)
attr[i] = toupper(value[i]);
else
attr[i] = ' ';
}
}
static ssize_t system_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_name_attr =
__ATTR(system_name, 0644, system_name_show, system_name_store);
static ssize_t sysplex_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t sysplex_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("sysplex_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(sysplex_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute sysplex_name_attr =
__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
static ssize_t system_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_type", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_type, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_type_attr =
__ATTR(system_type, 0644, system_type_show, system_type_store);
static ssize_t system_level_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
unsigned long long level;
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
}
static ssize_t system_level_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
unsigned long long level;
char *endp;
level = simple_strtoull(buf, &endp, 16);
if (endp == buf)
return -EINVAL;
if (*endp == '\n')
endp++;
if (*endp)
return -EINVAL;
mutex_lock(&sclp_cpi_mutex);
system_level = level;
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_level_attr =
__ATTR(system_level, 0644, system_level_show, system_level_store);
static ssize_t set_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
if (rc)
return rc;
return len;
}
static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
static struct attribute *cpi_attrs[] = {
&system_name_attr.attr,
&sysplex_name_attr.attr,
&system_type_attr.attr,
&system_level_attr.attr,
&set_attr.attr,
NULL,
};
static struct attribute_group cpi_attr_group = {
.attrs = cpi_attrs,
};
static struct kset *cpi_kset;
int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
const u64 level)
{
int rc;
rc = check_string("system_name", system);
if (rc)
return rc;
rc = check_string("sysplex_name", sysplex);
if (rc)
return rc;
rc = check_string("system_type", type);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, system);
set_string(sysplex_name, sysplex);
set_string(system_type, type);
system_level = level;
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
EXPORT_SYMBOL(sclp_cpi_set_data);
static int __init cpi_init(void)
{
int rc;
cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
if (!cpi_kset)
return -ENOMEM;
rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
if (rc)
kset_unregister(cpi_kset);
return rc;
}
__initcall(cpi_init);

View File

@@ -0,0 +1,15 @@
/*
* drivers/s390/char/sclp_cpi_sys.h
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2007
* Author(s): Michael Ernst <mernst@de.ibm.com>
*/
#ifndef __SCLP_CPI_SYS_H__
#define __SCLP_CPI_SYS_H__
int sclp_cpi_set_data(const char *system, const char *sysplex,
const char *type, u64 level);
#endif /* __SCLP_CPI_SYS_H__ */

View File

@@ -0,0 +1,85 @@
/*
* drivers/s390/char/sclp_quiesce.c
* signal quiesce handler
*
* (C) Copyright IBM Corp. 1999,2004
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/sigp.h>
#include <asm/smp.h>
#include "sclp.h"
static void (*old_machine_restart)(char *);
static void (*old_machine_halt)(void);
static void (*old_machine_power_off)(void);
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
psw_t quiesce_psw;
smp_send_stop();
quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
if (_machine_restart != (void *) do_machine_quiesce) {
old_machine_restart = _machine_restart;
old_machine_halt = _machine_halt;
old_machine_power_off = _machine_power_off;
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
}
ctrl_alt_del();
}
/* Undo machine restart/halt/power_off modification on resume */
static void sclp_quiesce_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_RESTORE:
if (old_machine_restart) {
_machine_restart = old_machine_restart;
_machine_halt = old_machine_halt;
_machine_power_off = old_machine_power_off;
old_machine_restart = NULL;
old_machine_halt = NULL;
old_machine_power_off = NULL;
}
break;
case SCLP_PM_EVENT_FREEZE:
case SCLP_PM_EVENT_THAW:
break;
}
}
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
.pm_event_fn = sclp_quiesce_pm_event
};
/* Initialize quiesce driver. */
static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
module_init(sclp_quiesce_init);

View File

@@ -0,0 +1,474 @@
/*
* driver: reading from and writing to system console on S/390 via SCLP
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <asm/uaccess.h>
#include "sclp.h"
#include "sclp_rw.h"
/*
* The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
static void sclp_rw_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
sclp_console_pm_event(sclp_pm_event);
}
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK,
.pm_event_fn = sclp_rw_pm_event,
};
/*
* Setup a sclp write buffer. Gets a page as input (4K) and returns
* a pointer to a struct sclp_buffer structure that is located at the
* end of the input page. This reduces the buffer space by a few
* bytes but simplifies things.
*/
struct sclp_buffer *
sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
{
struct sclp_buffer *buffer;
struct write_sccb *sccb;
sccb = (struct write_sccb *) page;
/*
* We keep the struct sclp_buffer structure at the end
* of the sccb page.
*/
buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
buffer->sccb = sccb;
buffer->retry_count = 0;
buffer->mto_number = 0;
buffer->mto_char_sum = 0;
buffer->current_line = NULL;
buffer->current_length = 0;
buffer->columns = columns;
buffer->htab = htab;
/* initialize sccb */
memset(sccb, 0, sizeof(struct write_sccb));
sccb->header.length = sizeof(struct write_sccb);
sccb->msg_buf.header.length = sizeof(struct msg_buf);
sccb->msg_buf.header.type = EVTYP_MSG;
sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
sccb->msg_buf.mdb.header.type = 1;
sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
sccb->msg_buf.mdb.header.revision_code = 1;
sccb->msg_buf.mdb.go.length = sizeof(struct go);
sccb->msg_buf.mdb.go.type = 1;
return buffer;
}
/*
* Return a pointer to the original page that has been used to create
* the buffer.
*/
void *
sclp_unmake_buffer(struct sclp_buffer *buffer)
{
return buffer->sccb;
}
/*
* Initialize a new Message Text Object (MTO) at the end of the provided buffer
* with enough room for max_len characters. Return 0 on success.
*/
static int
sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
{
struct write_sccb *sccb;
struct mto *mto;
int mto_size;
/* max size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + max_len;
/* check if current buffer sccb can contain the mto */
sccb = buffer->sccb;
if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
return -ENOMEM;
/* find address of new message text object */
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/*
* fill the new Message-Text Object,
* starting behind the former last byte of the SCCB
*/
memset(mto, 0, sizeof(struct mto));
mto->length = sizeof(struct mto);
mto->type = 4; /* message text object */
mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
/* set pointer to first byte after struct mto. */
buffer->current_line = (char *) (mto + 1);
buffer->current_length = 0;
return 0;
}
/*
* Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
* MTO, enclosing MDB, event buffer and SCCB.
*/
static void
sclp_finalize_mto(struct sclp_buffer *buffer)
{
struct write_sccb *sccb;
struct mto *mto;
int str_len, mto_size;
str_len = buffer->current_length;
buffer->current_line = NULL;
buffer->current_length = 0;
/* real size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + str_len;
/* find address of new message text object */
sccb = buffer->sccb;
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/* set size of message text object */
mto->length = mto_size;
/*
* update values of sizes
* (SCCB, Event(Message) Buffer, Message Data Block)
*/
sccb->header.length += mto_size;
sccb->msg_buf.header.length += mto_size;
sccb->msg_buf.mdb.header.length += mto_size;
/*
* count number of buffered messages (= number of Message Text
* Objects) and number of buffered characters
* for the SCCB currently used for buffering and at all
*/
buffer->mto_number++;
buffer->mto_char_sum += str_len;
}
/*
* processing of a message including escape characters,
* returns number of characters written to the output sccb
* ("processed" means that is not guaranteed that the character have already
* been sent to the SCLP but that it will be done at least next time the SCLP
* is not busy)
*/
int
sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
{
int spaces, i_msg;
int rc;
/*
* parse msg for escape sequences (\t,\v ...) and put formated
* msg into an mto (created by sclp_initialize_mto).
*
* We have to do this work ourselfs because there is no support for
* these characters on the native machine and only partial support
* under VM (Why does VM interpret \n but the native machine doesn't ?)
*
* Depending on i/o-control setting the message is always written
* immediately or we wait for a final new line maybe coming with the
* next message. Besides we avoid a buffer overrun by writing its
* content.
*
* RESTRICTIONS:
*
* \r and \b work within one line because we are not able to modify
* previous output that have already been accepted by the SCLP.
*
* \t combined with following \r is not correctly represented because
* \t is expanded to some spaces but \r does not know about a
* previous \t and decreases the current position by one column.
* This is in order to a slim and quick implementation.
*/
for (i_msg = 0; i_msg < count; i_msg++) {
switch (msg[i_msg]) {
case '\n': /* new line, line feed (ASCII) */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer, 0);
if (rc)
return i_msg;
}
sclp_finalize_mto(buffer);
break;
case '\a': /* bell, one for several times */
/* set SCLP sound alarm bit in General Object */
buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
GNRLMSGFLGS_SNDALRM;
break;
case '\t': /* horizontal tabulator */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
/* "go to (next htab-boundary + 1, same line)" */
do {
if (buffer->current_length >= buffer->columns)
break;
/* ok, add a blank */
*buffer->current_line++ = 0x40;
buffer->current_length++;
} while (buffer->current_length % buffer->htab);
break;
case '\f': /* form feed */
case '\v': /* vertical tabulator */
/* "go to (actual column, actual line + 1)" */
/* = new line, leading spaces */
if (buffer->current_line != NULL) {
spaces = buffer->current_length;
sclp_finalize_mto(buffer);
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
memset(buffer->current_line, 0x40, spaces);
buffer->current_line += spaces;
buffer->current_length = spaces;
} else {
/* one an empty line this is the same as \n */
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
sclp_finalize_mto(buffer);
}
break;
case '\b': /* backspace */
/* "go to (actual column - 1, actual line)" */
/* decrement counter indicating position, */
/* do not remove last character */
if (buffer->current_line != NULL &&
buffer->current_length > 0) {
buffer->current_length--;
buffer->current_line--;
}
break;
case 0x00: /* end of string */
/* transfer current line to SCCB */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* skip the rest of the message including the 0 byte */
i_msg = count - 1;
break;
default: /* no escape character */
/* do not output unprintable characters */
if (!isprint(msg[i_msg]))
break;
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
buffer->current_length++;
break;
}
/* check if current mto is full */
if (buffer->current_line != NULL &&
buffer->current_length >= buffer->columns)
sclp_finalize_mto(buffer);
}
/* return number of processed characters */
return i_msg;
}
/*
* Return the number of free bytes in the sccb
*/
int
sclp_buffer_space(struct sclp_buffer *buffer)
{
int count;
count = MAX_SCCB_ROOM - buffer->sccb->header.length;
if (buffer->current_line != NULL)
count -= sizeof(struct mto) + buffer->current_length;
return count;
}
/*
* Return number of characters in buffer
*/
int
sclp_chars_in_buffer(struct sclp_buffer *buffer)
{
int count;
count = buffer->mto_char_sum;
if (buffer->current_line != NULL)
count += buffer->current_length;
return count;
}
/*
* sets or provides some values that influence the drivers behaviour
*/
void
sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
{
buffer->columns = columns;
if (buffer->current_line != NULL &&
buffer->current_length > buffer->columns)
sclp_finalize_mto(buffer);
}
void
sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
{
buffer->htab = htab;
}
/*
* called by sclp_console_init and/or sclp_tty_init
*/
int
sclp_rw_init(void)
{
static int init_done = 0;
int rc;
if (init_done)
return 0;
rc = sclp_register(&sclp_rw_event);
if (rc == 0)
init_done = 1;
return rc;
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* second half of Write Event Data-function that has to be done after
* interruption indicating completion of Service Call.
*/
static void
sclp_writedata_callback(struct sclp_req *request, void *data)
{
int rc;
struct sclp_buffer *buffer;
struct write_sccb *sccb;
buffer = (struct sclp_buffer *) data;
sccb = buffer->sccb;
if (request->status == SCLP_REQ_FAILED) {
if (buffer->callback != NULL)
buffer->callback(buffer, -EIO);
return;
}
/* check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
/* Normal completion, buffer processed, message(s) sent */
rc = 0;
break;
case 0x0340: /* Contained SCLP equipment check */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* not all buffers were processed */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
} else
rc = 0;
break;
case 0x0040: /* SCLP equipment check */
case 0x05f0: /* Target resource in improper state */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* retry request */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
break;
default:
if (sccb->header.response_code == 0x71f0)
rc = -ENOMEM;
else
rc = -EINVAL;
break;
}
if (buffer->callback != NULL)
buffer->callback(buffer, rc);
}
/*
* Setup the request structure in the struct sclp_buffer to do SCLP Write
* Event Data and pass the request to the core SCLP loop. Return zero on
* success, non-zero otherwise.
*/
int
sclp_emit_buffer(struct sclp_buffer *buffer,
void (*callback)(struct sclp_buffer *, int))
{
struct write_sccb *sccb;
/* add current line if there is one */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* Are there messages in the output buffer ? */
if (buffer->mto_number == 0)
return -EIO;
sccb = buffer->sccb;
if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK)
/* Use normal write message */
sccb->msg_buf.header.type = EVTYP_MSG;
else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
/* Use write priority message */
sccb->msg_buf.header.type = EVTYP_PMSGCMD;
else
return -ENOSYS;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
buffer->request.callback_data = buffer;
buffer->request.sccb = sccb;
buffer->callback = callback;
return sclp_add_request(&buffer->request);
}

View File

@@ -0,0 +1,101 @@
/*
* interface to the SCLP-read/write driver
*
* Copyright IBM Corporation 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_RW_H__
#define __SCLP_RW_H__
#include <linux/list.h>
struct mto {
u16 length;
u16 type;
u16 line_type_flags;
u8 alarm_control;
u8 _reserved[3];
} __attribute__((packed));
struct go {
u16 length;
u16 type;
u32 domid;
u8 hhmmss_time[8];
u8 th_time[3];
u8 reserved_0;
u8 dddyyyy_date[7];
u8 _reserved_1;
u16 general_msg_flags;
u8 _reserved_2[10];
u8 originating_system_name[8];
u8 job_guest_name[8];
} __attribute__((packed));
struct mdb_header {
u16 length;
u16 type;
u32 tag;
u32 revision_code;
} __attribute__((packed));
struct mdb {
struct mdb_header header;
struct go go;
} __attribute__((packed));
struct msg_buf {
struct evbuf_header header;
struct mdb mdb;
} __attribute__((packed));
struct write_sccb {
struct sccb_header header;
struct msg_buf msg_buf;
} __attribute__((packed));
/* The number of empty mto buffers that can be contained in a single sccb. */
#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
sizeof(struct write_sccb)) / sizeof(struct mto))
/*
* data structure for information about list of SCCBs (only for writing),
* will be located at the end of a SCCBs page
*/
struct sclp_buffer {
struct list_head list; /* list_head for sccb_info chain */
struct sclp_req request;
struct write_sccb *sccb;
char *current_line;
int current_length;
int retry_count;
/* output format settings */
unsigned short columns;
unsigned short htab;
/* statistics about this buffer */
unsigned int mto_char_sum; /* # chars in sccb */
unsigned int mto_number; /* # mtos in sccb */
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_buffer *, int);
};
int sclp_rw_init(void);
struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
void *sclp_unmake_buffer(struct sclp_buffer *);
int sclp_buffer_space(struct sclp_buffer *);
int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
void sclp_set_columns(struct sclp_buffer *, unsigned short);
void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *);
#ifdef CONFIG_SCLP_CONSOLE
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
#else
static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
#endif
#endif /* __SCLP_RW_H__ */

View File

@@ -0,0 +1,257 @@
/*
* Sclp "store data in absolut storage"
*
* Copyright IBM Corp. 2003,2007
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "sclp_sdias"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/sched.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#include "sclp.h"
#include "sclp_rw.h"
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
#define SDIAS_RETRIES 300
#define SDIAS_SLEEP_TICKS 50
#define EQ_STORE_DATA 0x0
#define EQ_SIZE 0x1
#define DI_FCP_DUMP 0x0
#define ASA_SIZE_32 0x0
#define ASA_SIZE_64 0x1
#define EVSTATE_ALL_STORED 0x0
#define EVSTATE_NO_DATA 0x3
#define EVSTATE_PART_STORED 0x10
static struct debug_info *sdias_dbf;
static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
struct sdias_evbuf {
struct evbuf_header hdr;
u8 event_qual;
u8 data_id;
u64 reserved2;
u32 event_id;
u16 reserved3;
u8 asa_size;
u8 event_status;
u32 reserved4;
u32 blk_cnt;
u64 asa;
u32 reserved5;
u32 fbn;
u32 reserved6;
u32 lbn;
u16 reserved7;
u16 dbs;
} __attribute__((packed));
struct sdias_sccb {
struct sccb_header hdr;
struct sdias_evbuf evbuf;
} __attribute__((packed));
static struct sdias_sccb sccb __attribute__((aligned(4096)));
static int sclp_req_done;
static wait_queue_head_t sdias_wq;
static DEFINE_MUTEX(sdias_mutex);
static void sdias_callback(struct sclp_req *request, void *data)
{
struct sdias_sccb *cbsccb;
cbsccb = (struct sdias_sccb *) request->sccb;
sclp_req_done = 1;
wake_up(&sdias_wq); /* Inform caller, that request is complete */
TRACE("callback done\n");
}
static int sdias_sclp_send(struct sclp_req *req)
{
int retries;
int rc;
for (retries = SDIAS_RETRIES; retries; retries--) {
sclp_req_done = 0;
TRACE("add request\n");
rc = sclp_add_request(req);
if (rc) {
/* not initiated, wait some time and retry */
set_current_state(TASK_INTERRUPTIBLE);
TRACE("add request failed: rc = %i\n",rc);
schedule_timeout(SDIAS_SLEEP_TICKS);
continue;
}
/* initiated, wait for completion of service call */
wait_event(sdias_wq, (sclp_req_done == 1));
if (req->status == SCLP_REQ_FAILED) {
TRACE("sclp request failed\n");
rc = -EIO;
continue;
}
TRACE("request done\n");
break;
}
return rc;
}
/*
* Get number of blocks (4K) available in the HSA
*/
int sclp_sdias_blk_count(void)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.event_qual = EQ_SIZE;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("send failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sccb.evbuf.event_status) {
case 0:
rc = sccb.evbuf.blk_cnt;
break;
default:
pr_err("SCLP error: %x\n",
sccb.evbuf.event_status);
rc = -EIO;
goto out;
}
TRACE("%i blocks\n", rc);
out:
mutex_unlock(&sdias_mutex);
return rc;
}
/*
* Copy from HSA to absolute storage (not reentrant):
*
* @dest : Address of buffer where data should be copied
* @start_blk: Start Block (beginning with 1)
* @nr_blks : Number of 4K blocks to copy
*
* Return Value: 0 : Requested 'number' of blocks of data copied
* <0: ERROR - negative event status
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.hdr.flags = 0;
sccb.evbuf.event_qual = EQ_STORE_DATA;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
#ifdef __s390x__
sccb.evbuf.asa_size = ASA_SIZE_64;
#else
sccb.evbuf.asa_size = ASA_SIZE_32;
#endif
sccb.evbuf.event_status = 0;
sccb.evbuf.blk_cnt = nr_blks;
sccb.evbuf.asa = (unsigned long)dest;
sccb.evbuf.fbn = start_blk;
sccb.evbuf.lbn = 0;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("copy failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sccb.evbuf.event_status) {
case EVSTATE_ALL_STORED:
TRACE("all stored\n");
case EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
break;
case EVSTATE_NO_DATA:
TRACE("no data\n");
default:
pr_err("Error from SCLP while copying hsa. "
"Event status = %x\n",
sccb.evbuf.event_status);
rc = -EIO;
}
out:
mutex_unlock(&sdias_mutex);
return rc;
}
int __init sclp_sdias_init(void)
{
int rc;
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
rc = sclp_register(&sclp_sdias_register);
if (rc)
return rc;
init_waitqueue_head(&sdias_wq);
TRACE("init done\n");
return 0;
}
void __exit sclp_sdias_exit(void)
{
debug_unregister(sdias_dbf);
sclp_unregister(&sclp_sdias_register);
}

View File

@@ -0,0 +1,621 @@
/*
* drivers/s390/char/sclp_tty.c
* SCLP line mode terminal driver.
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include "ctrlchar.h"
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
/*
* size of a buffer that collects single characters coming in
* via sclp_tty_put_char()
*/
#define SCLP_TTY_BUF_SIZE 512
/*
* There is exactly one SCLP terminal, so we can keep things simple
* and allocate all variables statically.
*/
/* Lock to guard over changes to global variables. */
static spinlock_t sclp_tty_lock;
/* List of free pages that can be used for console output buffering. */
static struct list_head sclp_tty_pages;
/* List of full struct sclp_buffer structures ready for output. */
static struct list_head sclp_tty_outqueue;
/* Counter how many buffers are emitted. */
static int sclp_tty_buffer_count;
/* Pointer to current console buffer. */
static struct sclp_buffer *sclp_ttybuf;
/* Timer for delayed output of console messages. */
static struct timer_list sclp_tty_timer;
static struct tty_struct *sclp_tty;
static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
static int sclp_tty_tolower;
static int sclp_tty_columns = 80;
#define SPACES_PER_TAB 8
#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
/* This routine is called whenever we try to open a SCLP terminal. */
static int
sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
sclp_tty = tty;
tty->driver_data = NULL;
tty->low_latency = 0;
return 0;
}
/* This routine is called when the SCLP terminal is closed. */
static void
sclp_tty_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count > 1)
return;
sclp_tty = NULL;
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted. This is not an exact number because not every
* character needs the same space in the sccb. The worst case is
* a string of newlines. Every newlines creates a new mto which
* needs 8 bytes.
*/
static int
sclp_tty_write_room (struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
list_for_each(l, &sclp_tty_pages)
count += NR_EMPTY_MTO_PER_SCCB;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
static void
sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_tty_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
sclp_tty_buffer_count--;
list_add_tail((struct list_head *) page, &sclp_tty_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_tty_outqueue))
buffer = list_entry(sclp_tty_outqueue.next,
struct sclp_buffer, list);
spin_unlock_irqrestore(&sclp_tty_lock, flags);
} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
/* check if the tty needs a wake up call */
if (sclp_tty != NULL) {
tty_wakeup(sclp_tty);
}
}
static inline void
__sclp_ttybuf_emit(struct sclp_buffer *buffer)
{
unsigned long flags;
int count;
int rc;
spin_lock_irqsave(&sclp_tty_lock, flags);
list_add_tail(&buffer->list, &sclp_tty_outqueue);
count = sclp_tty_buffer_count++;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (count)
return;
rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
if (rc)
sclp_ttybuf_callback(buffer, rc);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer.
*/
static void
sclp_tty_timeout(unsigned long data)
{
unsigned long flags;
struct sclp_buffer *buf;
spin_lock_irqsave(&sclp_tty_lock, flags);
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (buf != NULL) {
__sclp_ttybuf_emit(buf);
}
}
/*
* Write a string to the sclp tty.
*/
static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
struct sclp_buffer *buf;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
do {
/* Create a sclp output buffer if none exists yet */
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (may_fail)
goto out;
else
sclp_sync_wait();
spin_lock_irqsave(&sclp_tty_lock, flags);
}
page = sclp_tty_pages.next;
list_del((struct list_head *) page);
sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
SPACES_PER_TAB);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_ttybuf, str, count);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
__sclp_ttybuf_emit(buf);
spin_lock_irqsave(&sclp_tty_lock, flags);
str += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
init_timer(&sclp_tty_timer);
sclp_tty_timer.function = sclp_tty_timeout;
sclp_tty_timer.data = 0UL;
sclp_tty_timer.expires = jiffies + HZ/10;
add_timer(&sclp_tty_timer);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
return overall_written;
}
/*
* This routine is called by the kernel to write a series of characters to the
* tty device. The characters may come from user space or kernel space. This
* routine will return the number of characters actually accepted for writing.
*/
static int
sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return sclp_tty_write_string(buf, count, 1);
}
/*
* This routine is called by the kernel to write a single character to the tty
* device. If the kernel uses this routine, it must call the flush_chars()
* routine (if defined) when it is done stuffing characters into the driver.
*
* Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
* If the given character is a '\n' the contents of the SCLP write buffer
* - including previous characters from sclp_tty_put_char() and strings from
* sclp_write() without final '\n' - will be written.
*/
static int
sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
sclp_tty_chars[sclp_tty_chars_count++] = ch;
if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return 1;
}
/*
* This routine is called by the kernel after it has written a series of
* characters to the tty device using put_char().
*/
static void
sclp_tty_flush_chars(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* This routine returns the number of characters in the write buffer of the
* SCLP driver. The provided number includes all characters that are stored
* in the SCCB (will be written next time the SCLP is not busy) as well as
* characters in the write buffer (will not be written as long as there is a
* final line feed missing).
*/
static int
sclp_tty_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_buffer *t;
int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_chars_in_buffer(sclp_ttybuf);
list_for_each(l, &sclp_tty_outqueue) {
t = list_entry(l, struct sclp_buffer, list);
count += sclp_chars_in_buffer(t);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
/*
* removes all content from buffers of low level driver
*/
static void
sclp_tty_flush_buffer(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* push input to tty
*/
static void
sclp_tty_input(unsigned char* buf, unsigned int count)
{
unsigned int cchar;
/*
* If this tty driver is currently closed
* then throw the received input away.
*/
if (sclp_tty == NULL)
return;
cchar = ctrlchar_handle(buf, count, sclp_tty);
switch (cchar & CTRLCHAR_MASK) {
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
tty_insert_flip_char(sclp_tty, cchar, TTY_NORMAL);
tty_flip_buffer_push(sclp_tty);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
if (count < 2 ||
(strncmp((const char *) buf + count - 2, "^n", 2) &&
strncmp((const char *) buf + count - 2, "\252n", 2))) {
/* add the auto \n */
tty_insert_flip_string(sclp_tty, buf, count);
tty_insert_flip_char(sclp_tty, '\n', TTY_NORMAL);
} else
tty_insert_flip_string(sclp_tty, buf, count - 2);
tty_flip_buffer_push(sclp_tty);
break;
}
}
/*
* get a EBCDIC string in upper/lower case,
* find out characters in lower/upper case separated by a special character,
* modifiy original string,
* returns length of resulting string
*/
static int sclp_switch_cases(unsigned char *buf, int count)
{
unsigned char *ip, *op;
int toggle;
/* initially changing case is off */
toggle = 0;
ip = op = buf;
while (count-- > 0) {
/* compare with special character */
if (*ip == CASE_DELIMITER) {
/* followed by another special character? */
if (count && ip[1] == CASE_DELIMITER) {
/*
* ... then put a single copy of the special
* character to the output string
*/
*op++ = *ip++;
count--;
} else
/*
* ... special character follower by a normal
* character toggles the case change behaviour
*/
toggle = ~toggle;
/* skip special character */
ip++;
} else
/* not the special character */
if (toggle)
/* but case switching is on */
if (sclp_tty_tolower)
/* switch to uppercase */
*op++ = _ebc_toupper[(int) *ip++];
else
/* switch to lowercase */
*op++ = _ebc_tolower[(int) *ip++];
else
/* no case switching, copy the character */
*op++ = *ip++;
}
/* return length of reformatted string. */
return op - buf;
}
static void
sclp_get_input(unsigned char *start, unsigned char *end)
{
int count;
count = end - start;
if (sclp_tty_tolower)
EBC_TOLOWER(start, count);
count = sclp_switch_cases(start, count);
/* convert EBCDIC to ASCII (modify original input in SCCB) */
sclp_ebcasc_str(start, count);
/* transfer input to high level driver */
sclp_tty_input(start, count);
}
static inline struct gds_vector *
find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
{
struct gds_vector *vec;
for (vec = start; vec < end; vec = (void *) vec + vec->length)
if (vec->gds_id == id)
return vec;
return NULL;
}
static inline struct gds_subvector *
find_gds_subvector(struct gds_subvector *start,
struct gds_subvector *end, u8 key)
{
struct gds_subvector *subvec;
for (subvec = start; subvec < end;
subvec = (void *) subvec + subvec->length)
if (subvec->key == key)
return subvec;
return NULL;
}
static inline void
sclp_eval_selfdeftextmsg(struct gds_subvector *start,
struct gds_subvector *end)
{
struct gds_subvector *subvec;
subvec = start;
while (subvec < end) {
subvec = find_gds_subvector(subvec, end, 0x30);
if (!subvec)
break;
sclp_get_input((unsigned char *)(subvec + 1),
(unsigned char *) subvec + subvec->length);
subvec = (void *) subvec + subvec->length;
}
}
static inline void
sclp_eval_textcmd(struct gds_subvector *start,
struct gds_subvector *end)
{
struct gds_subvector *subvec;
subvec = start;
while (subvec < end) {
subvec = find_gds_subvector(subvec, end,
GDS_KEY_SELFDEFTEXTMSG);
if (!subvec)
break;
sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
(void *)subvec + subvec->length);
subvec = (void *) subvec + subvec->length;
}
}
static inline void
sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
{
struct gds_vector *vec;
vec = start;
while (vec < end) {
vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
if (!vec)
break;
sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
(void *) vec + vec->length);
vec = (void *) vec + vec->length;
}
}
static inline void
sclp_eval_mdsmu(struct gds_vector *start, void *end)
{
struct gds_vector *vec;
vec = find_gds_vector(start, end, GDS_ID_CPMSU);
if (vec)
sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
}
static void
sclp_tty_receiver(struct evbuf_header *evbuf)
{
struct gds_vector *start, *end, *vec;
start = (struct gds_vector *)(evbuf + 1);
end = (void *) evbuf + evbuf->length;
vec = find_gds_vector(start, end, GDS_ID_MDSMU);
if (vec)
sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
}
static void
sclp_tty_state_change(struct sclp_register *reg)
{
}
static struct sclp_register sclp_input_event =
{
.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
.state_change_fn = sclp_tty_state_change,
.receiver_fn = sclp_tty_receiver
};
static const struct tty_operations sclp_ops = {
.open = sclp_tty_open,
.close = sclp_tty_close,
.write = sclp_tty_write,
.put_char = sclp_tty_put_char,
.flush_chars = sclp_tty_flush_chars,
.write_room = sclp_tty_write_room,
.chars_in_buffer = sclp_tty_chars_in_buffer,
.flush_buffer = sclp_tty_flush_buffer,
};
static int __init
sclp_tty_init(void)
{
struct tty_driver *driver;
void *page;
int i;
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
rc = sclp_rw_init();
if (rc) {
put_tty_driver(driver);
return rc;
}
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_tty_pages);
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
put_tty_driver(driver);
return -ENOMEM;
}
list_add_tail((struct list_head *) page, &sclp_tty_pages);
}
INIT_LIST_HEAD(&sclp_tty_outqueue);
spin_lock_init(&sclp_tty_lock);
init_timer(&sclp_tty_timer);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
if (MACHINE_IS_VM) {
/*
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_tty_columns = 76;
/* case input lines to lowercase */
sclp_tty_tolower = 1;
}
sclp_tty_chars_count = 0;
sclp_tty = NULL;
rc = sclp_register(&sclp_input_event);
if (rc) {
put_tty_driver(driver);
return rc;
}
driver->owner = THIS_MODULE;
driver->driver_name = "sclp_line";
driver->name = "sclp_line";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->init_termios.c_iflag = IGNBRK | IGNPAR;
driver->init_termios.c_oflag = ONLCR | XTABS;
driver->init_termios.c_lflag = ISIG | ECHO;
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &sclp_ops);
rc = tty_register_driver(driver);
if (rc) {
put_tty_driver(driver);
return rc;
}
sclp_tty_driver = driver;
return 0;
}
module_init(sclp_tty_init);

View File

@@ -0,0 +1,18 @@
/*
* drivers/s390/char/sclp_tty.h
* interface to the SCLP-read/write driver
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_TTY_H__
#define __SCLP_TTY_H__
#include <linux/tty_driver.h>
extern struct tty_driver *sclp_tty_driver;
#endif /* __SCLP_TTY_H__ */

View File

@@ -0,0 +1,826 @@
/*
* SCLP VT220 terminal driver.
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <asm/uaccess.h>
#include "sclp.h"
#define SCLP_VT220_MAJOR TTY_MAJOR
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
#define SCLP_VT220_DEVICE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_NAME "ttyS"
#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
#define SCLP_VT220_BUF_SIZE 80
/* Representation of a single write request */
struct sclp_vt220_request {
struct list_head list;
struct sclp_req sclp_req;
int retry_count;
};
/* VT220 SCCB */
struct sclp_vt220_sccb {
struct sccb_header header;
struct evbuf_header evbuf;
};
#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
sizeof(struct sclp_vt220_request) - \
sizeof(struct sclp_vt220_sccb))
/* Structures and data needed to register tty driver */
static struct tty_driver *sclp_vt220_driver;
/* The tty_struct that the kernel associated with us */
static struct tty_struct *sclp_vt220_tty;
/* Lock to protect internal data from concurrent access */
static spinlock_t sclp_vt220_lock;
/* List of empty pages to be used as write request buffers */
static struct list_head sclp_vt220_empty;
/* List of pending requests */
static struct list_head sclp_vt220_outqueue;
/* Suspend mode flag */
static int sclp_vt220_suspended;
/* Flag that output queue is currently running */
static int sclp_vt220_queue_running;
/* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */
static struct timer_list sclp_vt220_timer;
/* Pointer to current request buffer which has been partially filled but not
* yet sent */
static struct sclp_vt220_request *sclp_vt220_current_request;
/* Number of characters in current request buffer */
static int sclp_vt220_buffered_chars;
/* Counter controlling core driver initialization. */
static int __initdata sclp_vt220_init_count;
/* Flag indicating that sclp_vt220_current_request should really
* have been already queued but wasn't because the SCLP was processing
* another buffer */
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
/* Registration structure for our interest in SCLP event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
.receive_mask = EVTYP_VT220MSG_MASK,
.state_change_fn = NULL,
.receiver_fn = sclp_vt220_receiver_fn,
.pm_event_fn = sclp_vt220_pm_event_fn,
};
/*
* Put provided request buffer back into queue and check emit pending
* buffers if necessary.
*/
static void
sclp_vt220_process_queue(struct sclp_vt220_request *request)
{
unsigned long flags;
void *page;
do {
/* Put buffer back to list of empty buffers */
page = request->sclp_req.sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
/* Move request from outqueue to empty queue */
list_del(&request->list);
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
/* Check if there is a pending buffer on the out queue. */
request = NULL;
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
if (!request || sclp_vt220_suspended) {
sclp_vt220_queue_running = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
} while (__sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current();
/* Check if the tty needs a wake up call */
if (sclp_vt220_tty != NULL) {
tty_wakeup(sclp_vt220_tty);
}
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* Callback through which the result of a write request is reported by the
* SCLP.
*/
static void
sclp_vt220_callback(struct sclp_req *request, void *data)
{
struct sclp_vt220_request *vt220_request;
struct sclp_vt220_sccb *sccb;
vt220_request = (struct sclp_vt220_request *) data;
if (request->status == SCLP_REQ_FAILED) {
sclp_vt220_process_queue(vt220_request);
return;
}
sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
/* Check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
break;
case 0x05f0: /* Target resource in improper state */
break;
case 0x0340: /* Contained SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
/* Remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* Not all buffers were processed */
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
}
break;
case 0x0040: /* SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
break;
default:
break;
}
sclp_vt220_process_queue(vt220_request);
}
/*
* Emit vt220 request buffer to SCLP. Return zero on success, non-zero
* otherwise.
*/
static int
__sclp_vt220_emit(struct sclp_vt220_request *request)
{
if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
request->sclp_req.status = SCLP_REQ_FAILED;
return -EIO;
}
request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sclp_req.status = SCLP_REQ_FILLED;
request->sclp_req.callback = sclp_vt220_callback;
request->sclp_req.callback_data = (void *) request;
return sclp_add_request(&request->sclp_req);
}
/*
* Queue and emit current request.
*/
static void
sclp_vt220_emit_current(void)
{
unsigned long flags;
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (sclp_vt220_current_request) {
sccb = (struct sclp_vt220_sccb *)
sclp_vt220_current_request->sclp_req.sccb;
/* Only emit buffers with content */
if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
list_add_tail(&sclp_vt220_current_request->list,
&sclp_vt220_outqueue);
sclp_vt220_current_request = NULL;
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
}
sclp_vt220_flush_later = 0;
}
if (sclp_vt220_queue_running || sclp_vt220_suspended)
goto out_unlock;
if (list_empty(&sclp_vt220_outqueue))
goto out_unlock;
request = list_first_entry(&sclp_vt220_outqueue,
struct sclp_vt220_request, list);
sclp_vt220_queue_running = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (__sclp_vt220_emit(request))
sclp_vt220_process_queue(request);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
#define SCLP_NORMAL_WRITE 0x00
/*
* Helper function to initialize a page with the sclp request structure.
*/
static struct sclp_vt220_request *
sclp_vt220_initialize_page(void *page)
{
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
/* Place request structure at end of page */
request = ((struct sclp_vt220_request *)
((addr_t) page + PAGE_SIZE)) - 1;
request->retry_count = 0;
request->sclp_req.sccb = page;
/* SCCB goes at start of page */
sccb = (struct sclp_vt220_sccb *) page;
memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
sccb->header.length = sizeof(struct sclp_vt220_sccb);
sccb->header.function_code = SCLP_NORMAL_WRITE;
sccb->header.response_code = 0x0000;
sccb->evbuf.type = EVTYP_VT220MSG;
sccb->evbuf.length = sizeof(struct evbuf_header);
return request;
}
static inline unsigned int
sclp_vt220_space_left(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
sccb->header.length;
}
static inline unsigned int
sclp_vt220_chars_stored(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return sccb->evbuf.length - sizeof(struct evbuf_header);
}
/*
* Add msg to buffer associated with request. Return the number of characters
* added.
*/
static int
sclp_vt220_add_msg(struct sclp_vt220_request *request,
const unsigned char *msg, int count, int convertlf)
{
struct sclp_vt220_sccb *sccb;
void *buffer;
unsigned char c;
int from;
int to;
if (count > sclp_vt220_space_left(request))
count = sclp_vt220_space_left(request);
if (count <= 0)
return 0;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
buffer = (void *) ((addr_t) sccb + sccb->header.length);
if (convertlf) {
/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
for (from=0, to=0;
(from < count) && (to < sclp_vt220_space_left(request));
from++) {
/* Retrieve character */
c = msg[from];
/* Perform conversion */
if (c == 0x0a) {
if (to + 1 < sclp_vt220_space_left(request)) {
((unsigned char *) buffer)[to++] = c;
((unsigned char *) buffer)[to++] = 0x0d;
} else
break;
} else
((unsigned char *) buffer)[to++] = c;
}
sccb->header.length += to;
sccb->evbuf.length += to;
return from;
} else {
memcpy(buffer, (const void *) msg, count);
sccb->header.length += count;
sccb->evbuf.length += count;
return count;
}
}
/*
* Emit buffer after having waited long enough for more data to arrive.
*/
static void
sclp_vt220_timeout(unsigned long data)
{
sclp_vt220_emit_current();
}
#define BUFFER_MAX_DELAY HZ/20
/*
* Internal implementation of the write function. Write COUNT bytes of data
* from memory at BUF
* to the SCLP interface. In case that the data does not fit into the current
* write buffer, emit the current one and allocate a new one. If there are no
* more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
* is non-zero, the buffer will be scheduled for emitting after a timeout -
* otherwise the user has to explicitly call the flush function.
* A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
* buffer should be converted to 0x0a 0x0d. After completion, return the number
* of bytes written.
*/
static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
do {
/* Create an sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) {
while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (may_fail || sclp_vt220_suspended)
goto out;
else
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
page = (void *) sclp_vt220_empty.next;
list_del((struct list_head *) page);
sclp_vt220_current_request =
sclp_vt220_initialize_page(page);
}
/* Try to write the string to the current request buffer */
written = sclp_vt220_add_msg(sclp_vt220_current_request,
buf, count, convertlf);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
buf += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after some time */
if (sclp_vt220_current_request != NULL &&
!timer_pending(&sclp_vt220_timer) && do_schedule) {
sclp_vt220_timer.function = sclp_vt220_timeout;
sclp_vt220_timer.data = 0UL;
sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
add_timer(&sclp_vt220_timer);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
out:
return overall_written;
}
/*
* This routine is called by the kernel to write a series of
* characters to the tty device. The characters may come from
* user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*/
static int
sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
return __sclp_vt220_write(buf, count, 1, 0, 1);
}
#define SCLP_VT220_SESSION_ENDED 0x01
#define SCLP_VT220_SESSION_STARTED 0x80
#define SCLP_VT220_SESSION_DATA 0x00
/*
* Called by the SCLP to report incoming event buffers.
*/
static void
sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
{
char *buffer;
unsigned int count;
/* Ignore input if device is not open */
if (sclp_vt220_tty == NULL)
return;
buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
count = evbuf->length - sizeof(struct evbuf_header);
switch (*buffer) {
case SCLP_VT220_SESSION_ENDED:
case SCLP_VT220_SESSION_STARTED:
break;
case SCLP_VT220_SESSION_DATA:
/* Send input to line discipline */
buffer++;
count--;
tty_insert_flip_string(sclp_vt220_tty, buffer, count);
tty_flip_buffer_push(sclp_vt220_tty);
break;
}
}
/*
* This routine is called when a particular tty device is opened.
*/
static int
sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
sclp_vt220_tty = tty;
tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
if (tty->driver_data == NULL)
return -ENOMEM;
tty->low_latency = 0;
}
return 0;
}
/*
* This routine is called when a particular tty device is closed.
*/
static void
sclp_vt220_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
sclp_vt220_tty = NULL;
kfree(tty->driver_data);
tty->driver_data = NULL;
}
}
/*
* This routine is called by the kernel to write a single
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver.
*/
static int
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{
return __sclp_vt220_write(&ch, 1, 0, 0, 1);
}
/*
* This routine is called by the kernel after it has written a
* series of characters to the tty device using put_char().
*/
static void
sclp_vt220_flush_chars(struct tty_struct *tty)
{
if (!sclp_vt220_queue_running)
sclp_vt220_emit_current();
else
sclp_vt220_flush_later = 1;
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted.
*/
static int
sclp_vt220_write_room(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_space_left(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_empty)
count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Return number of buffered chars.
*/
static int
sclp_vt220_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_vt220_request *r;
int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_chars_stored(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_outqueue) {
r = list_entry(l, struct sclp_vt220_request, list);
count += sclp_vt220_chars_stored(r);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Pass on all buffers to the hardware. Return only when there are no more
* buffers pending.
*/
static void
sclp_vt220_flush_buffer(struct tty_struct *tty)
{
sclp_vt220_emit_current();
}
/* Release allocated pages. */
static void __init __sclp_vt220_free_pages(void)
{
struct list_head *page, *p;
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
free_page((unsigned long) page);
}
}
/* Release memory and unregister from sclp core. Controlled by init counting -
* only the last invoker will actually perform these actions. */
static void __init __sclp_vt220_cleanup(void)
{
sclp_vt220_init_count--;
if (sclp_vt220_init_count != 0)
return;
sclp_unregister(&sclp_vt220_register);
__sclp_vt220_free_pages();
}
/* Allocate buffer pages and register with sclp core. Controlled by init
* counting - only the first invoker will actually perform these actions. */
static int __init __sclp_vt220_init(int num_pages)
{
void *page;
int i;
int rc;
sclp_vt220_init_count++;
if (sclp_vt220_init_count != 1)
return 0;
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
init_timer(&sclp_vt220_timer);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
sclp_vt220_tty = NULL;
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
goto out;
list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
if (rc) {
__sclp_vt220_free_pages();
sclp_vt220_init_count--;
}
return rc;
}
static const struct tty_operations sclp_vt220_ops = {
.open = sclp_vt220_open,
.close = sclp_vt220_close,
.write = sclp_vt220_write,
.put_char = sclp_vt220_put_char,
.flush_chars = sclp_vt220_flush_chars,
.write_room = sclp_vt220_write_room,
.chars_in_buffer = sclp_vt220_chars_in_buffer,
.flush_buffer = sclp_vt220_flush_buffer,
};
/*
* Register driver with SCLP and Linux and initialize internal tty structures.
*/
static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
* symmetry between VM and LPAR systems regarding ttyS1. */
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
rc = __sclp_vt220_init(MAX_KMEM_PAGES);
if (rc)
goto out_driver;
driver->owner = THIS_MODULE;
driver->driver_name = SCLP_VT220_DRIVER_NAME;
driver->name = SCLP_VT220_DEVICE_NAME;
driver->major = SCLP_VT220_MAJOR;
driver->minor_start = SCLP_VT220_MINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &sclp_vt220_ops);
rc = tty_register_driver(driver);
if (rc)
goto out_init;
sclp_vt220_driver = driver;
return 0;
out_init:
__sclp_vt220_cleanup();
out_driver:
put_tty_driver(driver);
return rc;
}
__initcall(sclp_vt220_tty_init);
static void __sclp_vt220_flush_buffer(void)
{
unsigned long flags;
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
while (sclp_vt220_queue_running) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_vt220_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_vt220_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
__sclp_vt220_flush_buffer();
}
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_vt220_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_vt220_resume();
break;
}
}
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *
sclp_vt220_con_device(struct console *c, int *index)
{
*index = 0;
return sclp_vt220_driver;
}
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
{
__sclp_vt220_flush_buffer();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_vt220_notify,
.priority = 1,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_vt220_notify,
.priority = 1,
};
/* Structure needed to register with printk */
static struct console sclp_vt220_console =
{
.name = SCLP_VT220_CONSOLE_NAME,
.write = sclp_vt220_con_write,
.device = sclp_vt220_con_device,
.flags = CON_PRINTBUFFER,
.index = SCLP_VT220_CONSOLE_INDEX
};
static int __init
sclp_vt220_con_init(void)
{
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
if (rc)
return rc;
/* Attach linux console */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_vt220_console);
return 0;
}
console_initcall(sclp_vt220_con_init);
#endif /* CONFIG_SCLP_VT220_CONSOLE */

View File

@@ -0,0 +1,403 @@
/*
* drivers/s390/char/tape.h
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#ifndef _TAPE_H
#define _TAPE_H
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtio.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
struct gendisk;
/*
* Define DBF_LIKE_HELL for lots of messages in the debug feature.
*/
#define DBF_LIKE_HELL
#ifdef DBF_LIKE_HELL
#define DBF_LH(level, str, ...) \
do { \
debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
} while (0)
#else
#define DBF_LH(level, str, ...) do {} while(0)
#endif
/*
* macros s390 debug feature (dbf)
*/
#define DBF_EVENT(d_level, d_str...) \
do { \
debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
} while (0)
#define DBF_EXCEPTION(d_level, d_str...) \
do { \
debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
} while (0)
#define TAPE_VERSION_MAJOR 2
#define TAPE_VERSION_MINOR 0
#define TAPE_MAGIC "tape"
#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
#define TAPEBLOCK_HSEC_SIZE 2048
#define TAPEBLOCK_HSEC_S2B 2
#define TAPEBLOCK_RETRIES 5
enum tape_medium_state {
MS_UNKNOWN,
MS_LOADED,
MS_UNLOADED,
MS_SIZE
};
enum tape_state {
TS_UNUSED=0,
TS_IN_USE,
TS_BLKUSE,
TS_INIT,
TS_NOT_OPER,
TS_SIZE
};
enum tape_op {
TO_BLOCK, /* Block read */
TO_BSB, /* Backward space block */
TO_BSF, /* Backward space filemark */
TO_DSE, /* Data security erase */
TO_FSB, /* Forward space block */
TO_FSF, /* Forward space filemark */
TO_LBL, /* Locate block label */
TO_NOP, /* No operation */
TO_RBA, /* Read backward */
TO_RBI, /* Read block information */
TO_RFO, /* Read forward */
TO_REW, /* Rewind tape */
TO_RUN, /* Rewind and unload tape */
TO_WRI, /* Write block */
TO_WTM, /* Write tape mark */
TO_MSEN, /* Medium sense */
TO_LOAD, /* Load tape */
TO_READ_CONFIG, /* Read configuration data */
TO_READ_ATTMSG, /* Read attention message */
TO_DIS, /* Tape display */
TO_ASSIGN, /* Assign tape to channel path */
TO_UNASSIGN, /* Unassign tape from channel path */
TO_CRYPT_ON, /* Enable encrpytion */
TO_CRYPT_OFF, /* Disable encrpytion */
TO_KEKL_SET, /* Set KEK label */
TO_KEKL_QUERY, /* Query KEK label */
TO_RDC, /* Read device characteristics */
TO_SIZE, /* #entries in tape_op_t */
};
/* Forward declaration */
struct tape_device;
/* tape_request->status can be: */
enum tape_request_status {
TAPE_REQUEST_INIT, /* request is ready to be processed */
TAPE_REQUEST_QUEUED, /* request is queued to be processed */
TAPE_REQUEST_IN_IO, /* request is currently in IO */
TAPE_REQUEST_DONE, /* request is completed. */
TAPE_REQUEST_CANCEL, /* request should be canceled. */
TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
};
/* Tape CCW request */
struct tape_request {
struct list_head list; /* list head for request queueing. */
struct tape_device *device; /* tape device of this request */
struct ccw1 *cpaddr; /* address of the channel program. */
void *cpdata; /* pointer to ccw data. */
enum tape_request_status status;/* status of this request */
int options; /* options for execution. */
int retries; /* retry counter for error recovery. */
int rescnt; /* residual count from devstat. */
/* Callback for delivering final status. */
void (*callback)(struct tape_request *, void *);
void *callback_data;
enum tape_op op;
int rc;
};
/* Function type for magnetic tape commands */
typedef int (*tape_mtop_fn)(struct tape_device *, int);
/* Size of the array containing the mtops for a discipline */
#define TAPE_NR_MTOPS (MTMKPART+1)
/* Tape Discipline */
struct tape_discipline {
struct module *owner;
int (*setup_device)(struct tape_device *);
void (*cleanup_device)(struct tape_device *);
int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
struct tape_request *(*read_block)(struct tape_device *, size_t);
struct tape_request *(*write_block)(struct tape_device *, size_t);
void (*process_eov)(struct tape_device*);
#ifdef CONFIG_S390_TAPE_BLOCK
/* Block device stuff. */
struct tape_request *(*bread)(struct tape_device *, struct request *);
void (*check_locate)(struct tape_device *, struct tape_request *);
void (*free_bread)(struct tape_request *);
#endif
/* ioctl function for additional ioctls. */
int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
/* Array of tape commands with TAPE_NR_MTOPS entries */
tape_mtop_fn *mtop_array;
};
/*
* The discipline irq function either returns an error code (<0) which
* means that the request has failed with an error or one of the following:
*/
#define TAPE_IO_SUCCESS 0 /* request successful */
#define TAPE_IO_PENDING 1 /* request still running */
#define TAPE_IO_RETRY 2 /* retry to current request */
#define TAPE_IO_STOP 3 /* stop the running request */
#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
/* Char Frontend Data */
struct tape_char_data {
struct idal_buffer *idal_buf; /* idal buffer for user char data */
int block_size; /* of size block_size. */
};
#ifdef CONFIG_S390_TAPE_BLOCK
/* Block Frontend Data */
struct tape_blk_data
{
struct tape_device * device;
/* Block device request queue. */
struct request_queue * request_queue;
spinlock_t request_queue_lock;
/* Task to move entries from block request to CCS request queue. */
struct work_struct requeue_task;
atomic_t requeue_scheduled;
/* Current position on the tape. */
long block_position;
int medium_changed;
struct gendisk * disk;
};
#endif
/* Tape Info */
struct tape_device {
/* entry in tape_device_list */
struct list_head node;
int cdev_id;
struct ccw_device * cdev;
struct tape_class_device * nt;
struct tape_class_device * rt;
/* Device discipline information. */
struct tape_discipline * discipline;
void * discdata;
/* Generic status flags */
long tape_generic_status;
/* Device state information. */
wait_queue_head_t state_change_wq;
enum tape_state tape_state;
enum tape_medium_state medium_state;
unsigned char * modeset_byte;
/* Reference count. */
atomic_t ref_count;
/* Request queue. */
struct list_head req_queue;
/* Request wait queue. */
wait_queue_head_t wait_queue;
/* Each tape device has (currently) two minor numbers. */
int first_minor;
/* Number of tapemarks required for correct termination. */
int required_tapemarks;
/* Block ID of the BOF */
unsigned int bof;
/* Character device frontend data */
struct tape_char_data char_data;
#ifdef CONFIG_S390_TAPE_BLOCK
/* Block dev frontend data */
struct tape_blk_data blk_data;
#endif
/* Function to start or stop the next request later. */
struct delayed_work tape_dnr;
/* Timer for long busy */
struct timer_list lb_timeout;
};
/* Externals from tape_core.c */
extern struct tape_request *tape_alloc_request(int cplength, int datasize);
extern void tape_free_request(struct tape_request *);
extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *);
void tape_hotplug_event(struct tape_device *, int major, int action);
static inline int
tape_do_io_free(struct tape_device *device, struct tape_request *request)
{
int rc;
rc = tape_do_io(device, request);
tape_free_request(request);
return rc;
}
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
extern int tape_release(struct tape_device *);
extern int tape_mtop(struct tape_device *, int, int);
extern void tape_state_set(struct tape_device *, enum tape_state);
extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
extern int tape_generic_offline(struct ccw_device *);
extern int tape_generic_pm_suspend(struct ccw_device *);
/* Externals from tape_devmap.c */
extern int tape_generic_probe(struct ccw_device *);
extern void tape_generic_remove(struct ccw_device *);
extern struct tape_device *tape_get_device(int devindex);
extern struct tape_device *tape_get_device_reference(struct tape_device *);
extern struct tape_device *tape_put_device(struct tape_device *);
/* Externals from tape_char.c */
extern int tapechar_init(void);
extern void tapechar_exit(void);
extern int tapechar_setup_device(struct tape_device *);
extern void tapechar_cleanup_device(struct tape_device *);
/* Externals from tape_block.c */
#ifdef CONFIG_S390_TAPE_BLOCK
extern int tapeblock_init (void);
extern void tapeblock_exit(void);
extern int tapeblock_setup_device(struct tape_device *);
extern void tapeblock_cleanup_device(struct tape_device *);
#else
static inline int tapeblock_init (void) {return 0;}
static inline void tapeblock_exit (void) {;}
static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
#endif
/* tape initialisation functions */
#ifdef CONFIG_PROC_FS
extern void tape_proc_init (void);
extern void tape_proc_cleanup (void);
#else
static inline void tape_proc_init (void) {;}
static inline void tape_proc_cleanup (void) {;}
#endif
/* a function for dumping device sense info */
extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
struct irb *);
/* functions for handling the status of a device */
extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
/* The debug area */
extern debug_info_t *TAPE_DBF_AREA;
/* functions for building ccws */
static inline struct ccw1 *
tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
{
while (count-- > 0) {
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
ccw++;
}
return ccw;
}
static inline struct ccw1 *
tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
static inline struct ccw1 *
tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
/* Global vars */
extern const char *tape_state_verbose[];
extern const char *tape_op_verbose[];
#endif /* for ifdef tape.h */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,175 @@
/*
* drivers/s390/char/tape_3590.h
* tape device discipline for 3590 tapes.
*
* Copyright IBM Corp. 2001,2006
* Author(s): Stefan Bader <shbader@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_3590_H
#define _TAPE_3590_H
#define MEDIUM_SENSE 0xc2
#define READ_PREVIOUS 0x0a
#define MODE_SENSE 0xcf
#define PERFORM_SS_FUNC 0x77
#define READ_SS_DATA 0x3e
#define PREP_RD_SS_DATA 0x18
#define RD_ATTMSG 0x3
#define SENSE_BRA_PER 0
#define SENSE_BRA_CONT 1
#define SENSE_BRA_RE 2
#define SENSE_BRA_DRE 3
#define SENSE_FMT_LIBRARY 0x23
#define SENSE_FMT_UNSOLICITED 0x40
#define SENSE_FMT_COMMAND_REJ 0x41
#define SENSE_FMT_COMMAND_EXEC0 0x50
#define SENSE_FMT_COMMAND_EXEC1 0x51
#define SENSE_FMT_EVENT0 0x60
#define SENSE_FMT_EVENT1 0x61
#define SENSE_FMT_MIM 0x70
#define SENSE_FMT_SIM 0x71
#define MSENSE_UNASSOCIATED 0x00
#define MSENSE_ASSOCIATED_MOUNT 0x01
#define MSENSE_ASSOCIATED_UMOUNT 0x02
#define MSENSE_CRYPT_MASK 0x00000010
#define TAPE_3590_MAX_MSG 0xb0
/* Datatypes */
struct tape_3590_disc_data {
struct tape390_crypt_info crypt_info;
int read_back_op;
};
#define TAPE_3590_CRYPT_INFO(device) \
((struct tape_3590_disc_data*)(device->discdata))->crypt_info
#define TAPE_3590_READ_BACK_OP(device) \
((struct tape_3590_disc_data*)(device->discdata))->read_back_op
struct tape_3590_sense {
unsigned int command_rej:1;
unsigned int interv_req:1;
unsigned int bus_out_check:1;
unsigned int eq_check:1;
unsigned int data_check:1;
unsigned int overrun:1;
unsigned int def_unit_check:1;
unsigned int assgnd_elsew:1;
unsigned int locate_fail:1;
unsigned int inst_online:1;
unsigned int reserved:1;
unsigned int blk_seq_err:1;
unsigned int begin_part:1;
unsigned int wr_mode:1;
unsigned int wr_prot:1;
unsigned int not_cap:1;
unsigned int bra:2;
unsigned int lc:3;
unsigned int vlf_active:1;
unsigned int stm:1;
unsigned int med_pos:1;
unsigned int rac:8;
unsigned int rc_rqc:16;
unsigned int mc:8;
unsigned int sense_fmt:8;
union {
struct {
unsigned int emc:4;
unsigned int smc:4;
unsigned int sev:2;
unsigned int reserved:6;
unsigned int md:8;
unsigned int refcode:8;
unsigned int mid:16;
unsigned int mp:16;
unsigned char volid[6];
unsigned int fid:8;
} f70;
struct {
unsigned int emc:4;
unsigned int smc:4;
unsigned int sev:2;
unsigned int reserved1:5;
unsigned int mdf:1;
unsigned char md[3];
unsigned int simid:8;
unsigned int uid:16;
unsigned int refcode1:16;
unsigned int refcode2:16;
unsigned int refcode3:16;
unsigned int reserved2:8;
} f71;
unsigned char data[14];
} fmt;
unsigned char pad[10];
} __attribute__ ((packed));
struct tape_3590_med_sense {
unsigned int macst:4;
unsigned int masst:4;
char pad1[7];
unsigned int flags;
char pad2[116];
} __attribute__ ((packed));
struct tape_3590_rdc_data {
char data[64];
} __attribute__ ((packed));
/* Datastructures for 3592 encryption support */
struct tape3592_kekl {
__u8 flags;
char label[64];
} __attribute__ ((packed));
struct tape3592_kekl_pair {
__u8 count;
struct tape3592_kekl kekl[2];
} __attribute__ ((packed));
struct tape3592_kekl_query_data {
__u16 len;
__u8 fmt;
__u8 mc;
__u32 id;
__u8 flags;
struct tape3592_kekl_pair kekls;
char reserved[116];
} __attribute__ ((packed));
struct tape3592_kekl_query_order {
__u8 code;
__u8 flags;
char reserved1[2];
__u8 max_count;
char reserved2[35];
} __attribute__ ((packed));
struct tape3592_kekl_set_order {
__u8 code;
__u8 flags;
char reserved1[2];
__u8 op;
struct tape3592_kekl_pair kekls;
char reserved2[120];
} __attribute__ ((packed));
#endif /* _TAPE_3590_H */

View File

@@ -0,0 +1,477 @@
/*
* drivers/s390/char/tape_block.c
* block device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <asm/debug.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#define TAPEBLOCK_MAX_SEC 100
#define TAPEBLOCK_MIN_REQUEUE 3
/*
* 2003/11/25 Stefan Bader <shbader@de.ibm.com>
*
* In 2.5/2.6 the block device request function is very likely to be called
* with disabled interrupts (e.g. generic_unplug_device). So the driver can't
* just call any function that tries to allocate CCW requests from that con-
* text since it might sleep. There are two choices to work around this:
* a) do not allocate with kmalloc but use its own memory pool
* b) take requests from the queue outside that context, knowing that
* allocation might sleep
*/
/*
* file operation structure for tape block frontend
*/
static int tapeblock_open(struct block_device *, fmode_t);
static int tapeblock_release(struct gendisk *, fmode_t);
static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
unsigned long);
static int tapeblock_medium_changed(struct gendisk *);
static int tapeblock_revalidate_disk(struct gendisk *);
static const struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
.locked_ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
static int tapeblock_major = 0;
static void
tapeblock_trigger_requeue(struct tape_device *device)
{
/* Protect against rescheduling. */
if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
return;
schedule_work(&device->blk_data.requeue_task);
}
/*
* Post finished request.
*/
static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
struct tape_device *device;
struct request *req;
DBF_LH(6, "__tapeblock_end_request()\n");
device = ccw_req->device;
req = (struct request *) data;
blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
if (ccw_req->rc == 0)
/* Update position. */
device->blk_data.block_position =
(blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
else
/* We lost the position information due to an error. */
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
blk_peek_request(device->blk_data.request_queue))
tapeblock_trigger_requeue(device);
}
/*
* Feed the tape device CCW queue with requests supplied in a list.
*/
static int
tapeblock_start_request(struct tape_device *device, struct request *req)
{
struct tape_request * ccw_req;
int rc;
DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
ccw_req = device->discipline->bread(device, req);
if (IS_ERR(ccw_req)) {
DBF_EVENT(1, "TBLOCK: bread failed\n");
blk_end_request_all(req, -EIO);
return PTR_ERR(ccw_req);
}
ccw_req->callback = __tapeblock_end_request;
ccw_req->callback_data = (void *) req;
ccw_req->retries = TAPEBLOCK_RETRIES;
rc = tape_do_io_async(device, ccw_req);
if (rc) {
/*
* Start/enqueueing failed. No retries in
* this case.
*/
blk_end_request_all(req, -EIO);
device->discipline->free_bread(ccw_req);
}
return rc;
}
/*
* Move requests from the block device request queue to the tape device ccw
* queue.
*/
static void
tapeblock_requeue(struct work_struct *work) {
struct tape_blk_data * blkdat;
struct tape_device * device;
struct request_queue * queue;
int nr_queued;
struct request * req;
struct list_head * l;
int rc;
blkdat = container_of(work, struct tape_blk_data, requeue_task);
device = blkdat->device;
if (!device)
return;
spin_lock_irq(get_ccwdev_lock(device->cdev));
queue = device->blk_data.request_queue;
/* Count number of requests on ccw queue. */
nr_queued = 0;
list_for_each(l, &device->req_queue)
nr_queued++;
spin_unlock(get_ccwdev_lock(device->cdev));
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
blk_peek_request(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
req = blk_fetch_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
spin_unlock_irq(&device->blk_data.request_queue_lock);
blk_end_request_all(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req);
spin_lock_irq(&device->blk_data.request_queue_lock);
}
spin_unlock_irq(&device->blk_data.request_queue_lock);
atomic_set(&device->blk_data.requeue_scheduled, 0);
}
/*
* Tape request queue function. Called from ll_rw_blk.c
*/
static void
tapeblock_request_fn(struct request_queue *queue)
{
struct tape_device *device;
device = (struct tape_device *) queue->queuedata;
DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
BUG_ON(device == NULL);
tapeblock_trigger_requeue(device);
}
/*
* This function is called for every new tapedevice
*/
int
tapeblock_setup_device(struct tape_device * device)
{
struct tape_blk_data * blkdat;
struct gendisk * disk;
int rc;
blkdat = &device->blk_data;
blkdat->device = device;
spin_lock_init(&blkdat->request_queue_lock);
atomic_set(&blkdat->requeue_scheduled, 0);
blkdat->request_queue = blk_init_queue(
tapeblock_request_fn,
&blkdat->request_queue_lock
);
if (!blkdat->request_queue)
return -ENOMEM;
elevator_exit(blkdat->request_queue->elevator);
rc = elevator_init(blkdat->request_queue, "noop");
if (rc)
goto cleanup_queue;
blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_phys_segments(blkdat->request_queue, -1L);
blk_queue_max_hw_segments(blkdat->request_queue, -1L);
blk_queue_max_segment_size(blkdat->request_queue, -1L);
blk_queue_segment_boundary(blkdat->request_queue, -1L);
disk = alloc_disk(1);
if (!disk) {
rc = -ENOMEM;
goto cleanup_queue;
}
disk->major = tapeblock_major;
disk->first_minor = device->first_minor;
disk->fops = &tapeblock_fops;
disk->private_data = tape_get_device_reference(device);
disk->queue = blkdat->request_queue;
set_capacity(disk, 0);
sprintf(disk->disk_name, "btibm%d",
device->first_minor / TAPE_MINORS_PER_DEV);
blkdat->disk = disk;
blkdat->medium_changed = 1;
blkdat->request_queue->queuedata = tape_get_device_reference(device);
add_disk(disk);
tape_get_device_reference(device);
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
return 0;
cleanup_queue:
blk_cleanup_queue(blkdat->request_queue);
blkdat->request_queue = NULL;
return rc;
}
void
tapeblock_cleanup_device(struct tape_device *device)
{
flush_scheduled_work();
tape_put_device(device);
if (!device->blk_data.disk) {
goto cleanup_queue;
}
del_gendisk(device->blk_data.disk);
device->blk_data.disk->private_data =
tape_put_device(device->blk_data.disk->private_data);
put_disk(device->blk_data.disk);
device->blk_data.disk = NULL;
cleanup_queue:
device->blk_data.request_queue->queuedata = tape_put_device(device);
blk_cleanup_queue(device->blk_data.request_queue);
device->blk_data.request_queue = NULL;
}
/*
* Detect number of blocks of the tape.
* FIXME: can we extent this to detect the blocks size as well ?
*/
static int
tapeblock_revalidate_disk(struct gendisk *disk)
{
struct tape_device * device;
unsigned int nr_of_blks;
int rc;
device = (struct tape_device *) disk->private_data;
BUG_ON(!device);
if (!device->blk_data.medium_changed)
return 0;
rc = tape_mtop(device, MTFSFM, 1);
if (rc)
return rc;
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
pr_info("%s: Determining the size of the recorded area...\n",
dev_name(&device->cdev->dev));
DBF_LH(3, "Image file ends at %d\n", rc);
nr_of_blks = rc;
/* This will fail for the first file. Catch the error by checking the
* position. */
tape_mtop(device, MTBSF, 1);
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
if (rc > nr_of_blks)
return -EINVAL;
DBF_LH(3, "Image file starts at %d\n", rc);
device->bof = rc;
nr_of_blks -= rc;
pr_info("%s: The size of the recorded area is %i blocks\n",
dev_name(&device->cdev->dev), nr_of_blks);
set_capacity(device->blk_data.disk,
nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
device->blk_data.block_position = 0;
device->blk_data.medium_changed = 0;
return 0;
}
static int
tapeblock_medium_changed(struct gendisk *disk)
{
struct tape_device *device;
device = (struct tape_device *) disk->private_data;
DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
device, device->blk_data.medium_changed);
return device->blk_data.medium_changed;
}
/*
* Block frontend tape device open function.
*/
static int
tapeblock_open(struct block_device *bdev, fmode_t mode)
{
struct gendisk * disk = bdev->bd_disk;
struct tape_device * device;
int rc;
device = tape_get_device_reference(disk->private_data);
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
pr_warning("%s: Opening the tape failed because of missing "
"end-of-file marks\n", dev_name(&device->cdev->dev));
rc = -EPERM;
goto put_device;
}
rc = tape_open(device);
if (rc)
goto put_device;
rc = tapeblock_revalidate_disk(disk);
if (rc)
goto release;
/*
* Note: The reference to <device> is hold until the release function
* is called.
*/
tape_state_set(device, TS_BLKUSE);
return 0;
release:
tape_release(device);
put_device:
tape_put_device(device);
return rc;
}
/*
* Block frontend tape device release function.
*
* Note: One reference to the tape device was made by the open function. So
* we just get the pointer here and release the reference.
*/
static int
tapeblock_release(struct gendisk *disk, fmode_t mode)
{
struct tape_device *device = disk->private_data;
tape_state_set(device, TS_IN_USE);
tape_release(device);
tape_put_device(device);
return 0;
}
/*
* Support of some generic block device IOCTLs.
*/
static int
tapeblock_ioctl(
struct block_device * bdev,
fmode_t mode,
unsigned int command,
unsigned long arg
) {
int rc;
int minor;
struct gendisk *disk = bdev->bd_disk;
struct tape_device *device;
rc = 0;
BUG_ON(!disk);
device = disk->private_data;
BUG_ON(!device);
minor = MINOR(bdev->bd_dev);
DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
switch (command) {
/* Refuse some IOCTL calls without complaining (mount). */
case 0x5310: /* CDROMMULTISESSION */
rc = -EINVAL;
break;
default:
rc = -EINVAL;
}
return rc;
}
/*
* Initialize block device frontend.
*/
int
tapeblock_init(void)
{
int rc;
/* Register the tape major number to the kernel */
rc = register_blkdev(tapeblock_major, "tBLK");
if (rc < 0)
return rc;
if (tapeblock_major == 0)
tapeblock_major = rc;
return 0;
}
/*
* Deregister major for block device frontend
*/
void
tapeblock_exit(void)
{
unregister_blkdev(tapeblock_major, "tBLK");
}

View File

@@ -0,0 +1,495 @@
/*
* drivers/s390/char/tape_char.c
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright IBM Corp. 2001,2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
#include "tape_class.h"
#define TAPECHAR_MAJOR 0 /* get dynamic major */
/*
* file operation structure for tape character frontend
*/
static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
static long tapechar_compat_ioctl(struct file *, unsigned int,
unsigned long);
static const struct file_operations tape_fops =
{
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
.ioctl = tapechar_ioctl,
.compat_ioctl = tapechar_compat_ioctl,
.open = tapechar_open,
.release = tapechar_release,
};
static int tapechar_major = TAPECHAR_MAJOR;
/*
* This function is called for every new tapedevice
*/
int
tapechar_setup_device(struct tape_device * device)
{
char device_name[20];
sprintf(device_name, "ntibm%i", device->first_minor / 2);
device->nt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor),
&tape_fops,
device_name,
"non-rewinding"
);
device_name[0] = 'r';
device->rt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor + 1),
&tape_fops,
device_name,
"rewinding"
);
return 0;
}
void
tapechar_cleanup_device(struct tape_device *device)
{
unregister_tape_dev(&device->cdev->dev, device->rt);
device->rt = NULL;
unregister_tape_dev(&device->cdev->dev, device->nt);
device->nt = NULL;
}
static int
tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
{
struct idal_buffer *new;
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == block_size)
return 0;
if (block_size > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
block_size, MAX_BLOCKSIZE);
return -EINVAL;
}
/* The current idal buffer is not correct. Allocate a new one. */
new = idal_buffer_alloc(block_size, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
return 0;
}
/*
* Tape device read function
*/
static ssize_t
tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
int rc;
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
/*
* If the tape isn't terminated yet, do it now. And since we then
* are at the end of the tape there wouldn't be anything to read
* anyways. So we return immediatly.
*/
if(device->required_tapemarks) {
return tape_std_terminate_write(device);
}
/* Find out block size to use */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:read smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
} else {
block_size = count;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
#ifdef CONFIG_S390_TAPE_BLOCK
/* Changes position. */
device->blk_data.medium_changed = 1;
#endif
DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
/* Let the discipline build the ccw chain. */
request = device->discipline->read_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
/* Execute it. */
rc = tape_do_io(device, request);
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
filp->f_pos += rc;
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
rc = -EFAULT;
}
tape_free_request(request);
return rc;
}
/*
* Tape device write function
*/
static ssize_t
tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
size_t written;
int nblocks;
int i, rc;
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:write smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
nblocks = count / block_size;
} else {
block_size = count;
nblocks = 1;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
#ifdef CONFIG_S390_TAPE_BLOCK
/* Changes position. */
device->blk_data.medium_changed = 1;
#endif
DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
/* Let the discipline build the ccw chain. */
request = device->discipline->write_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
rc = 0;
written = 0;
for (i = 0; i < nblocks; i++) {
/* Copy data from user space to idal buffer. */
if (idal_buffer_from_user(device->char_data.idal_buf,
data, block_size)) {
rc = -EFAULT;
break;
}
rc = tape_do_io(device, request);
if (rc)
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
filp->f_pos += block_size - request->rescnt;
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
data += block_size;
}
tape_free_request(request);
if (rc == -ENOSPC) {
/*
* Ok, the device has no more space. It has NOT written
* the block.
*/
if (device->discipline->process_eov)
device->discipline->process_eov(device);
if (written > 0)
rc = 0;
}
/*
* After doing a write we always need two tapemarks to correctly
* terminate the tape (one to terminate the file, the second to
* flag the end of recorded data.
* Since process_eov positions the tape in front of the written
* tapemark it doesn't hurt to write two marks again.
*/
if (!rc)
device->required_tapemarks = 2;
return rc ? rc : written;
}
/*
* Character frontend tape device open function.
*/
static int
tapechar_open (struct inode *inode, struct file *filp)
{
struct tape_device *device;
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
imajor(filp->f_path.dentry->d_inode),
iminor(filp->f_path.dentry->d_inode));
if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
return -ENODEV;
lock_kernel();
minor = iminor(filp->f_path.dentry->d_inode);
device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
rc = PTR_ERR(device);
goto out;
}
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
rc = nonseekable_open(inode, filp);
}
else
tape_put_device(device);
out:
unlock_kernel();
return rc;
}
/*
* Character frontend tape device release function.
*/
static int
tapechar_release(struct inode *inode, struct file *filp)
{
struct tape_device *device;
DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
device = (struct tape_device *) filp->private_data;
/*
* If this is the rewinding tape minor then rewind. In that case we
* write all required tapemarks. Otherwise only one to terminate the
* file.
*/
if ((iminor(inode) & 1) != 0) {
if (device->required_tapemarks)
tape_std_terminate_write(device);
tape_mtop(device, MTREW, 1);
} else {
if (device->required_tapemarks > 1) {
if (tape_mtop(device, MTWEOF, 1) == 0)
device->required_tapemarks--;
}
}
if (device->char_data.idal_buf != NULL) {
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = NULL;
}
tape_release(device);
filp->private_data = tape_put_device(device);
return 0;
}
/*
* Tape device io controls.
*/
static int
tapechar_ioctl(struct inode *inp, struct file *filp,
unsigned int no, unsigned long data)
{
struct tape_device *device;
int rc;
DBF_EVENT(6, "TCHAR:ioct\n");
device = (struct tape_device *) filp->private_data;
if (no == MTIOCTOP) {
struct mtop op;
if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
/*
* Operations that change tape position should write final
* tapemarks.
*/
switch (op.mt_op) {
case MTFSF:
case MTBSF:
case MTFSR:
case MTBSR:
case MTREW:
case MTOFFL:
case MTEOM:
case MTRETEN:
case MTBSFM:
case MTFSFM:
case MTSEEK:
#ifdef CONFIG_S390_TAPE_BLOCK
device->blk_data.medium_changed = 1;
#endif
if (device->required_tapemarks)
tape_std_terminate_write(device);
default:
;
}
rc = tape_mtop(device, op.mt_op, op.mt_count);
if (op.mt_op == MTWEOF && rc == 0) {
if (op.mt_count > device->required_tapemarks)
device->required_tapemarks = 0;
else
device->required_tapemarks -= op.mt_count;
}
return rc;
}
if (no == MTIOCPOS) {
/* MTIOCPOS: query the tape position. */
struct mtpos pos;
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
pos.mt_blkno = rc;
if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
return -EFAULT;
return 0;
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
struct mtget get;
memset(&get, 0, sizeof(get));
get.mt_type = MT_ISUNKNOWN;
get.mt_resid = 0 /* device->devstat.rescnt */;
get.mt_dsreg = device->tape_state;
/* FIXME: mt_gstat, mt_erreg, mt_fileno */
get.mt_gstat = 0;
get.mt_erreg = 0;
get.mt_fileno = 0;
get.mt_gstat = device->tape_generic_status;
if (device->medium_state == MS_LOADED) {
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
if (rc == 0)
get.mt_gstat |= GMT_BOT(~0);
get.mt_blkno = rc;
}
if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
return -EFAULT;
return 0;
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
return device->discipline->ioctl_fn(device, no, data);
}
static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
if (device->discipline->ioctl_fn) {
lock_kernel();
rval = device->discipline->ioctl_fn(device, no, data);
unlock_kernel();
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
}
return rval;
}
/*
* Initialize character device frontend.
*/
int
tapechar_init (void)
{
dev_t dev;
if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
return -1;
tapechar_major = MAJOR(dev);
return 0;
}
/*
* cleanup
*/
void
tapechar_exit(void)
{
unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
}

View File

@@ -0,0 +1,127 @@
/*
* (C) Copyright IBM Corp. 2004
* tape_class.c
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
"(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
static struct class *tape_class;
/*
* Register a tape device and return a pointer to the cdev structure.
*
* device
* The pointer to the struct device of the physical (base) device.
* drivername
* The pointer to the drivers name for it's character devices.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* devname
* The pointer to the name of the character device.
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * mode_name)
{
struct tape_class_device * tcd;
int rc;
char * s;
tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
if (!tcd)
return ERR_PTR(-ENOMEM);
strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
tcd->char_device = cdev_alloc();
if (!tcd->char_device) {
rc = -ENOMEM;
goto fail_with_tcd;
}
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
tcd->char_device->dev = dev;
rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
if (rc)
goto fail_with_cdev;
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
&device->kobj,
&tcd->class_device->kobj,
tcd->mode_name
);
if (rc)
goto fail_with_class_device;
return tcd;
fail_with_class_device:
device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
fail_with_tcd:
kfree(tcd);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
}
EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
tape_class = class_create(THIS_MODULE, "tape390");
return 0;
}
static void __exit tape_exit(void)
{
class_destroy(tape_class);
tape_class = NULL;
}
postcore_initcall(tape_init);
module_exit(tape_exit);

View File

@@ -0,0 +1,61 @@
/*
* (C) Copyright IBM Corp. 2004 All Rights Reserved.
* tape_class.h
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#ifndef __TAPE_CLASS_H__
#define __TAPE_CLASS_H__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/kobject.h>
#include <linux/kobj_map.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#define TAPECLASS_NAME_LEN 32
struct tape_class_device {
struct cdev *char_device;
struct device *class_device;
char device_name[TAPECLASS_NAME_LEN];
char mode_name[TAPECLASS_NAME_LEN];
};
/*
* Register a tape device and return a pointer to the tape class device
* created by the call.
*
* device
* The pointer to the struct device of the physical (base) device.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* device_name
* Pointer to the logical device name (will also be used as kobject name
* of the cdev). This can also be called the name of the tape class
* device.
* mode_name
* Points to the name of the tape mode. This creates a link with that
* name from the physical device to the logical device (class).
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * node_name
);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
#endif /* __TAPE_CLASS_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,141 @@
/*
* drivers/s390/char/tape.c
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
* PROCFS Functions
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
[MS_LOADED] = "LOADED ",
[MS_UNLOADED] = "UNLOADED"
};
/* our proc tapedevices entry */
static struct proc_dir_entry *tape_proc_devices;
/*
* Show function for /proc/tapedevices
*/
static int tape_proc_show(struct seq_file *m, void *v)
{
struct tape_device *device;
struct tape_request *request;
const char *str;
unsigned long n;
n = (unsigned long) v - 1;
if (!n) {
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
device = tape_get_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
seq_printf(m, "%04X/", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X/", device->cdev->id.dev_type);
seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
if (device->char_data.block_size == 0)
seq_printf(m, "auto\t");
else
seq_printf(m, "%i\t", device->char_data.block_size);
if (device->tape_state >= 0 &&
device->tape_state < TS_SIZE)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN";
seq_printf(m, "%s\t", str);
if (!list_empty(&device->req_queue)) {
request = list_entry(device->req_queue.next,
struct tape_request, list);
str = tape_op_verbose[request->op];
} else
str = "---";
seq_printf(m, "%s\t", str);
seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_put_device(device);
return 0;
}
static void *tape_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= 256 / TAPE_MINORS_PER_DEV)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return tape_proc_start(m, pos);
}
static void tape_proc_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations tape_proc_seq = {
.start = tape_proc_start,
.next = tape_proc_next,
.stop = tape_proc_stop,
.show = tape_proc_show,
};
static int tape_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tape_proc_seq);
}
static const struct file_operations tape_proc_ops =
{
.owner = THIS_MODULE,
.open = tape_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
tape_proc_devices =
proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&tape_proc_ops);
if (tape_proc_devices == NULL) {
return;
}
}
/*
* Cleanup all stuff registered to the procfs
*/
void
tape_proc_cleanup(void)
{
if (tape_proc_devices != NULL)
remove_proc_entry ("tapedevices", NULL);
}

View File

@@ -0,0 +1,748 @@
/*
* drivers/s390/char/tape_std.c
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/timer.h>
#include <asm/types.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/tape390.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
/*
* tape_std_assign
*/
static void
tape_std_assign_timeout(unsigned long data)
{
struct tape_request * request;
struct tape_device * device;
int rc;
request = (struct tape_request *) data;
device = request->device;
BUG_ON(!device);
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n",
dev_name(&device->cdev->dev), rc);
}
int
tape_std_assign(struct tape_device *device)
{
int rc;
struct timer_list timeout;
struct tape_request *request;
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_ASSIGN;
tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/*
* The assign command sometimes blocks if the device is assigned
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
init_timer_on_stack(&timeout);
timeout.function = tape_std_assign_timeout;
timeout.data = (unsigned long) request;
timeout.expires = jiffies + 2 * HZ;
add_timer(&timeout);
rc = tape_do_io_interruptible(device, request);
del_timer(&timeout);
if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* tape_std_unassign
*/
int
tape_std_unassign (struct tape_device *device)
{
int rc;
struct tape_request *request;
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "(%08x): Can't unassign device\n",
device->cdev_id);
return -EIO;
}
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_UNASSIGN;
tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
if ((rc = tape_do_io(device, request)) != 0) {
DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* TAPE390_DISPLAY: Show a string on the tape display.
*/
int
tape_std_display(struct tape_device *device, struct display_struct *disp)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(2, 17);
if (IS_ERR(request)) {
DBF_EVENT(3, "TAPE: load display failed\n");
return PTR_ERR(request);
}
request->op = TO_DIS;
*(unsigned char *) request->cpdata = disp->cntrl;
DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return rc;
}
/*
* Read block id.
*/
int
tape_std_read_block_id(struct tape_device *device, __u64 *id)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(3, 8);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RBI;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0)
/* Get result from read buffer. */
*id = *(__u64 *) request->cpdata;
tape_free_request(request);
return rc;
}
int
tape_std_terminate_write(struct tape_device *device)
{
int rc;
if(device->required_tapemarks == 0)
return 0;
DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
device->required_tapemarks);
rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
if (rc)
return rc;
device->required_tapemarks = 0;
return tape_mtop(device, MTBSR, 1);
}
/*
* MTLOAD: Loads the tape.
* The default implementation just wait until the tape medium state changes
* to MS_LOADED.
*/
int
tape_std_mtload(struct tape_device *device, int count)
{
return wait_event_interruptible(device->state_change_wq,
(device->medium_state == MS_LOADED));
}
/*
* MTSETBLK: Set block size.
*/
int
tape_std_mtsetblk(struct tape_device *device, int count)
{
struct idal_buffer *new;
DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
if (count <= 0) {
/*
* Just set block_size to 0. tapechar_read/tapechar_write
* will realloc the idal buffer if a bigger one than the
* current is needed.
*/
device->char_data.block_size = 0;
return 0;
}
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == count)
/* We already have a idal buffer of that size. */
return 0;
if (count > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
count, MAX_BLOCKSIZE);
return -EINVAL;
}
/* Allocate a new idal buffer. */
new = idal_buffer_alloc(count, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
device->char_data.block_size = count;
DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
return 0;
}
/*
* MTRESET: Set block size to 0.
*/
int
tape_std_mtreset(struct tape_device *device, int count)
{
DBF_EVENT(6, "TCHAR:devreset:\n");
device->char_data.block_size = 0;
return 0;
}
/*
* MTFSF: Forward space over 'count' file marks. The tape is positioned
* at the EOT (End of Tape) side of the file mark.
*/
int
tape_std_mtfsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTFSR: Forward space over 'count' tape blocks (blocksize is set
* via MTSETBLK.
*/
int
tape_std_mtfsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "FSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTBSR: Backward space over 'count' tape blocks.
* (blocksize is set via MTSETBLK.
*/
int
tape_std_mtbsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "BSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTWEOF: Write 'count' file marks at the current position.
*/
int
tape_std_mtweof(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_WTM;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSFM: Backward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side of the
* last skipped file mark.
*/
int
tape_std_mtbsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSF: Backward space over 'count' file marks. The tape is positioned at
* the EOT (End of Tape) side of the last skipped file mark.
*/
int
tape_std_mtbsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTFSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTFSFM: Forward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side
* of the last skipped file mark.
*/
int
tape_std_mtfsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTBSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTREW: Rewind the tape.
*/
int
tape_std_mtrew(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_REW;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTOFFL: Rewind the tape and put the drive off-line.
* Implement 'rewind unload'
*/
int
tape_std_mtoffl(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RUN;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTNOP: 'No operation'.
*/
int
tape_std_mtnop(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTEOM: positions at the end of the portion of the tape already used
* for recordind data. MTEOM positions after the last file mark, ready for
* appending another file.
*/
int
tape_std_mteom(struct tape_device *device, int mt_count)
{
int rc;
/*
* Seek from the beginning of tape (rewind).
*/
if ((rc = tape_mtop(device, MTREW, 1)) < 0)
return rc;
/*
* The logical end of volume is given by two sewuential tapemarks.
* Look for this by skipping to the next file (over one tapemark)
* and then test for another one (fsr returns 1 if a tapemark was
* encountered).
*/
do {
if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
return rc;
if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
return rc;
} while (rc == 0);
return tape_mtop(device, MTBSR, 1);
}
/*
* MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
*/
int
tape_std_mtreten(struct tape_device *device, int mt_count)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(4, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
/* execute it, MTRETEN rc gets ignored */
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return tape_mtop(device, MTREW, 1);
}
/*
* MTERASE: erases the tape.
*/
int
tape_std_mterase(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(6, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_DSE;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTUNLOAD: Rewind the tape and unload it.
*/
int
tape_std_mtunload(struct tape_device *device, int mt_count)
{
return tape_mtop(device, MTOFFL, mt_count);
}
/*
* MTCOMPRESSION: used to enable compression.
* Sets the IDRC on/off.
*/
int
tape_std_mtcompression(struct tape_device *device, int mt_count)
{
struct tape_request *request;
if (mt_count < 0 || mt_count > 1) {
DBF_EXCEPTION(6, "xcom parm\n");
return -EINVAL;
}
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
if (mt_count == 0)
*device->modeset_byte &= ~0x08;
else
*device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* Read Block
*/
struct tape_request *
tape_std_read_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
/*
* We have to alloc 4 ccws in order to be able to transform request
* into a read backward request in error case.
*/
request = tape_alloc_request(4, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xrbl fail");
return request;
}
request->op = TO_RFO;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
device->char_data.idal_buf);
DBF_EVENT(6, "xrbl ccwg\n");
return request;
}
/*
* Read Block backward transformation function.
*/
void
tape_std_read_backward(struct tape_device *device, struct tape_request *request)
{
/*
* We have allocated 4 ccws in tape_std_read, so we can now
* transform the request to a read backward, followed by a
* forward space block.
*/
request->op = TO_RBA;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
device->char_data.idal_buf);
tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
DBF_EVENT(6, "xrop ccwg");}
/*
* Write Block
*/
struct tape_request *
tape_std_write_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xwbl fail\n");
return request;
}
request->op = TO_WRI;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
device->char_data.idal_buf);
DBF_EVENT(6, "xwbl ccwg\n");
return request;
}
/*
* This routine is called by frontend after an ENOSP on write
*/
void
tape_std_process_eov(struct tape_device *device)
{
/*
* End of volume: We have to backspace the last written record, then
* we TRY to write a tapemark and then backspace over the written TM
*/
if (tape_mtop(device, MTBSR, 1) == 0 &&
tape_mtop(device, MTWEOF, 1) == 0) {
tape_mtop(device, MTBSR, 1);
}
}
EXPORT_SYMBOL(tape_std_assign);
EXPORT_SYMBOL(tape_std_unassign);
EXPORT_SYMBOL(tape_std_display);
EXPORT_SYMBOL(tape_std_read_block_id);
EXPORT_SYMBOL(tape_std_mtload);
EXPORT_SYMBOL(tape_std_mtsetblk);
EXPORT_SYMBOL(tape_std_mtreset);
EXPORT_SYMBOL(tape_std_mtfsf);
EXPORT_SYMBOL(tape_std_mtfsr);
EXPORT_SYMBOL(tape_std_mtbsr);
EXPORT_SYMBOL(tape_std_mtweof);
EXPORT_SYMBOL(tape_std_mtbsfm);
EXPORT_SYMBOL(tape_std_mtbsf);
EXPORT_SYMBOL(tape_std_mtfsfm);
EXPORT_SYMBOL(tape_std_mtrew);
EXPORT_SYMBOL(tape_std_mtoffl);
EXPORT_SYMBOL(tape_std_mtnop);
EXPORT_SYMBOL(tape_std_mteom);
EXPORT_SYMBOL(tape_std_mtreten);
EXPORT_SYMBOL(tape_std_mterase);
EXPORT_SYMBOL(tape_std_mtunload);
EXPORT_SYMBOL(tape_std_mtcompression);
EXPORT_SYMBOL(tape_std_read_block);
EXPORT_SYMBOL(tape_std_read_backward);
EXPORT_SYMBOL(tape_std_write_block);
EXPORT_SYMBOL(tape_std_process_eov);

View File

@@ -0,0 +1,159 @@
/*
* drivers/s390/char/tape_std.h
* standard tape device functions for ibm tapes.
*
* Copyright (C) IBM Corp. 2001,2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_STD_H
#define _TAPE_STD_H
#include <asm/tape390.h>
/*
* Biggest block size to handle. Currently 64K because we only build
* channel programs without data chaining.
*/
#define MAX_BLOCKSIZE 65535
/*
* The CCW commands for the Tape type of command.
*/
#define INVALID_00 0x00 /* Invalid cmd */
#define BACKSPACEBLOCK 0x27 /* Back Space block */
#define BACKSPACEFILE 0x2f /* Back Space file */
#define DATA_SEC_ERASE 0x97 /* Data security erase */
#define ERASE_GAP 0x17 /* Erase Gap */
#define FORSPACEBLOCK 0x37 /* Forward space block */
#define FORSPACEFILE 0x3F /* Forward Space file */
#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
#define NOP 0x03 /* No operation */
#define READ_FORWARD 0x02 /* Read forward */
#define REWIND 0x07 /* Rewind */
#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
#define SENSE 0x04 /* Sense */
#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
#define WRITE_CMD 0x01 /* Write */
#define WRITETAPEMARK 0x1F /* Write Tape Mark */
#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
#define CONTROL_ACCESS 0xE3 /* Set high speed */
#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
#define MODE_SET_C3 0xC3 /* for 3420 */
#define MODE_SET_CB 0xCB /* for 3420 */
#define MODE_SET_D3 0xD3 /* for 3420 */
#define READ_BACKWARD 0x0C /* */
#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
#define READ_DEV_CHAR 0x64 /* Read device characteristics */
#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
#define SYNC 0x43 /* Synchronize (flush buffer) */
#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
#define SENSE_COMMAND_REJECT 0x80
#define SENSE_INTERVENTION_REQUIRED 0x40
#define SENSE_BUS_OUT_CHECK 0x20
#define SENSE_EQUIPMENT_CHECK 0x10
#define SENSE_DATA_CHECK 0x08
#define SENSE_OVERRUN 0x04
#define SENSE_DEFERRED_UNIT_CHECK 0x02
#define SENSE_ASSIGNED_ELSEWHERE 0x01
#define SENSE_LOCATE_FAILURE 0x80
#define SENSE_DRIVE_ONLINE 0x40
#define SENSE_RESERVED 0x20
#define SENSE_RECORD_SEQUENCE_ERR 0x10
#define SENSE_BEGINNING_OF_TAPE 0x08
#define SENSE_WRITE_MODE 0x04
#define SENSE_WRITE_PROTECT 0x02
#define SENSE_NOT_CAPABLE 0x01
#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
#define SENSE_CHANNEL_ADAPTER_LOC 0x10
#define SENSE_REPORTING_CU 0x08
#define SENSE_AUTOMATIC_LOADER 0x04
#define SENSE_TAPE_SYNC_MODE 0x02
#define SENSE_TAPE_POSITIONING 0x01
/* discipline functions */
struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device,
struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t);
struct tape_request *tape_std_bread(struct tape_device *, struct request *);
void tape_std_free_bread(struct tape_request *);
void tape_std_check_locate(struct tape_device *, struct tape_request *);
struct tape_request *tape_std_bwrite(struct request *,
struct tape_device *, int);
/* Some non-mtop commands. */
int tape_std_assign(struct tape_device *);
int tape_std_unassign(struct tape_device *);
int tape_std_read_block_id(struct tape_device *device, __u64 *id);
int tape_std_display(struct tape_device *, struct display_struct *disp);
int tape_std_terminate_write(struct tape_device *);
/* Standard magnetic tape commands. */
int tape_std_mtbsf(struct tape_device *, int);
int tape_std_mtbsfm(struct tape_device *, int);
int tape_std_mtbsr(struct tape_device *, int);
int tape_std_mtcompression(struct tape_device *, int);
int tape_std_mteom(struct tape_device *, int);
int tape_std_mterase(struct tape_device *, int);
int tape_std_mtfsf(struct tape_device *, int);
int tape_std_mtfsfm(struct tape_device *, int);
int tape_std_mtfsr(struct tape_device *, int);
int tape_std_mtload(struct tape_device *, int);
int tape_std_mtnop(struct tape_device *, int);
int tape_std_mtoffl(struct tape_device *, int);
int tape_std_mtreset(struct tape_device *, int);
int tape_std_mtreten(struct tape_device *, int);
int tape_std_mtrew(struct tape_device *, int);
int tape_std_mtsetblk(struct tape_device *, int);
int tape_std_mtunload(struct tape_device *, int);
int tape_std_mtweof(struct tape_device *, int);
/* Event handlers */
void tape_std_default_handler(struct tape_device *);
void tape_std_unexpect_uchk_handler(struct tape_device *);
void tape_std_irq(struct tape_device *);
void tape_std_process_eov(struct tape_device *);
// the error recovery stuff:
void tape_std_error_recovery(struct tape_device *);
void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
void tape_std_error_recovery_succeded(struct tape_device *);
void tape_std_error_recovery_do_retry(struct tape_device *);
void tape_std_error_recovery_read_opposite(struct tape_device *);
void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
/* S390 tape types */
enum s390_tape_type {
tape_3480,
tape_3490,
tape_3590,
tape_3592,
};
#endif // _TAPE_STD_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
/*
* drivers/s390/char/tty3270.h
*
* Copyright IBM Corp. 2007
*
*/
#ifndef __DRIVERS_S390_CHAR_TTY3270_H
#define __DRIVERS_S390_CHAR_TTY3270_H
#include <linux/tty.h>
#include <linux/tty_driver.h>
extern struct tty_driver *tty3270_driver;
#endif /* __DRIVERS_S390_CHAR_TTY3270_H */

View File

@@ -0,0 +1,225 @@
/*
* Copyright IBM Corp. 2004,2007
* Interface implementation for communication with the z/VM control program
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
#define KMSG_COMPONENT "vmcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
#include "vmcp.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
MODULE_DESCRIPTION("z/VM CP interface");
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
session = kmalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
mutex_init(&session->mutex);
file->private_data = session;
return nonseekable_open(inode, file);
}
static int vmcp_release(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
session = (struct vmcp_session *)file->private_data;
file->private_data = NULL;
free_pages((unsigned long)session->response, get_order(session->bufsize));
kfree(session);
return 0;
}
static ssize_t
vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
ssize_t ret;
size_t size;
struct vmcp_session *session;
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
mutex_unlock(&session->mutex);
return 0;
}
size = min_t(size_t, session->resp_size, session->bufsize);
ret = simple_read_from_buffer(buff, count, ppos,
session->response, size);
mutex_unlock(&session->mutex);
return ret;
}
static ssize_t
vmcp_write(struct file *file, const char __user *buff, size_t count,
loff_t *ppos)
{
char *cmd;
struct vmcp_session *session;
if (count > 240)
return -EINVAL;
cmd = kmalloc(count + 1, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (copy_from_user(cmd, buff, count)) {
kfree(cmd);
return -EFAULT;
}
cmd[count] = '\0';
session = (struct vmcp_session *)file->private_data;
if (mutex_lock_interruptible(&session->mutex)) {
kfree(cmd);
return -ERESTARTSYS;
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
| __GFP_REPEAT | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
mutex_unlock(&session->mutex);
kfree(cmd);
return -ENOMEM;
}
debug_text_event(vmcp_debug, 1, cmd);
session->resp_size = cpcmd(cmd, session->response, session->bufsize,
&session->resp_code);
mutex_unlock(&session->mutex);
kfree(cmd);
*ppos = 0; /* reset the file pointer after a command */
return count;
}
/*
* These ioctls are available, as the semantics of the diagnose 8 call
* does not fit very well into a Linux call. Diagnose X'08' is described in
* CP Programming Services SC24-6084-00
*
* VMCP_GETCODE: gives the CP return code back to user space
* VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
* expects adjacent pages in real storage and to make matters worse, we
* dont know the size of the response. Therefore we default to PAGESIZE and
* let userspace to change the response size, if userspace expects a bigger
* response
*/
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
int temp;
session = (struct vmcp_session *)file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
return put_user(temp, (int __user *)arg);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
temp = get_user(session->bufsize, (int __user *)arg);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
}
mutex_unlock(&session->mutex);
return temp;
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
return put_user(temp, (int __user *)arg);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
}
}
static const struct file_operations vmcp_fops = {
.owner = THIS_MODULE,
.open = vmcp_open,
.release = vmcp_release,
.read = vmcp_read,
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
};
static struct miscdevice vmcp_dev = {
.name = "vmcp",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vmcp_fops,
};
static int __init vmcp_init(void)
{
int ret;
if (!MACHINE_IS_VM) {
pr_warning("The z/VM CP interface device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
}
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
return -ENOMEM;
ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
if (ret) {
debug_unregister(vmcp_debug);
return ret;
}
ret = misc_register(&vmcp_dev);
if (ret) {
debug_unregister(vmcp_debug);
return ret;
}
return 0;
}
static void __exit vmcp_exit(void)
{
misc_deregister(&vmcp_dev);
debug_unregister(vmcp_debug);
}
module_init(vmcp_init);
module_exit(vmcp_exit);

View File

@@ -0,0 +1,30 @@
/*
* Copyright (C) 2004, 2005 IBM Corporation
* Interface implementation for communication with the z/VM control program
* Version 1.0
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
*
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson
*/
#include <linux/ioctl.h>
#include <linux/mutex.h>
#define VMCP_GETCODE _IOR(0x10, 1, int)
#define VMCP_SETBUF _IOW(0x10, 2, int)
#define VMCP_GETSIZE _IOR(0x10, 3, int)
struct vmcp_session {
unsigned int bufsize;
char *response;
int resp_size;
int resp_code;
/* As we use copy_from/to_user, which might *
* sleep and cannot use a spinlock */
struct mutex mutex;
};

View File

@@ -0,0 +1,902 @@
/*
* drivers/s390/char/vmlogrdr.c
* character device driver for reading z/VM system service records
*
*
* Copyright IBM Corp. 2004, 2009
* character device driver for reading z/VM system service records,
* Version 1.0
* Author(s): Xenia Tkatschow <xenia@us.ibm.com>
* Stefan Weinhuber <wein@de.ibm.com>
*
*/
#define KMSG_COMPONENT "vmlogrdr"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include <linux/kmod.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/smp_lock.h>
#include <linux/string.h>
MODULE_AUTHOR
("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
" Stefan Weinhuber (wein@de.ibm.com)");
MODULE_DESCRIPTION ("Character device driver for reading z/VM "
"system service records.");
MODULE_LICENSE("GPL");
/*
* The size of the buffer for iucv data transfer is one page,
* but in addition to the data we read from iucv we also
* place an integer and some characters into that buffer,
* so the maximum size for record data is a little less then
* one page.
*/
#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
/*
* The elements that are concurrently accessed by bottom halves are
* connection_established, iucv_path_severed, local_interrupt_buffer
* and receive_ready. The first three can be protected by
* priv_lock. receive_ready is atomic, so it can be incremented and
* decremented without holding a lock.
* The variable dev_in_use needs to be protected by the lock, since
* it's a flag used by open to make sure that the device is opened only
* by one user at the same time.
*/
struct vmlogrdr_priv_t {
char system_service[8];
char internal_name[8];
char recording_name[8];
struct iucv_path *path;
int connection_established;
int iucv_path_severed;
struct iucv_message local_interrupt_buffer;
atomic_t receive_ready;
int minor_num;
char * buffer;
char * current_position;
int remaining;
ulong residual_length;
int buffer_free;
int dev_in_use; /* 1: already opened, 0: not opened*/
spinlock_t priv_lock;
struct device *device;
struct device *class_device;
int autorecording;
int autopurge;
};
/*
* File operation structure for vmlogrdr devices
*/
static int vmlogrdr_open(struct inode *, struct file *);
static int vmlogrdr_release(struct inode *, struct file *);
static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
size_t count, loff_t * ppos);
static const struct file_operations vmlogrdr_fops = {
.owner = THIS_MODULE,
.open = vmlogrdr_open,
.release = vmlogrdr_release,
.read = vmlogrdr_read,
};
static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
static void vmlogrdr_iucv_message_pending(struct iucv_path *,
struct iucv_message *);
static struct iucv_handler vmlogrdr_iucv_handler = {
.path_complete = vmlogrdr_iucv_path_complete,
.path_severed = vmlogrdr_iucv_path_severed,
.message_pending = vmlogrdr_iucv_message_pending,
};
static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
/*
* pointer to system service private structure
* minor number 0 --> logrec
* minor number 1 --> account
* minor number 2 --> symptom
*/
static struct vmlogrdr_priv_t sys_ser[] = {
{ .system_service = "*LOGREC ",
.internal_name = "logrec",
.recording_name = "EREP",
.minor_num = 0,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*ACCOUNT",
.internal_name = "account",
.recording_name = "ACCOUNT",
.minor_num = 1,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*SYMPTOM",
.internal_name = "symptom",
.recording_name = "SYMPTOM",
.minor_num = 2,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
.autorecording = 1,
.autopurge = 1,
}
};
#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
static char FENCE[] = {"EOR"};
static int vmlogrdr_major = 0;
static struct cdev *vmlogrdr_cdev = NULL;
static int recording_class_AB;
static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct vmlogrdr_priv_t * logptr = path->private;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
}
static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct vmlogrdr_priv_t * logptr = path->private;
u8 reason = (u8) ipuser[8];
pr_err("vmlogrdr: connection severed with reason %i\n", reason);
iucv_path_sever(path, NULL);
kfree(path);
logptr->path = NULL;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 0;
logptr->iucv_path_severed = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
/* just in case we're sleeping waiting for a record */
wake_up_interruptible(&read_wait_queue);
}
static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct vmlogrdr_priv_t * logptr = path->private;
/*
* This function is the bottom half so it should be quick.
* Copy the external interrupt data into our local eib and increment
* the usage count
*/
spin_lock(&logptr->priv_lock);
memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
atomic_inc(&logptr->receive_ready);
spin_unlock(&logptr->priv_lock);
wake_up_interruptible(&read_wait_queue);
}
static int vmlogrdr_get_recording_class_AB(void)
{
char cp_command[]="QUERY COMMAND RECORDING ";
char cp_response[80];
char *tail;
int len,i;
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
len = strnlen(cp_response,sizeof(cp_response));
// now the parsing
tail=strnchr(cp_response,len,'=');
if (!tail)
return 0;
tail++;
if (!strncmp("ANY",tail,3))
return 1;
if (!strncmp("NONE",tail,4))
return 0;
/*
* expect comma separated list of classes here, if one of them
* is A or B return 1 otherwise 0
*/
for (i=tail-cp_response; i<len; i++)
if ( cp_response[i]=='A' || cp_response[i]=='B' )
return 1;
return 0;
}
static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
int action, int purge)
{
char cp_command[80];
char cp_response[160];
char *onoff, *qid_string;
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
onoff = ((action == 1) ? "ON" : "OFF");
qid_string = ((recording_class_AB == 1) ? " QID * " : "");
/*
* The recording commands needs to be called with option QID
* for guests that have previlege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
*/
if (purge) {
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
logptr->recording_name,
onoff,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
/* The recording command will usually answer with 'Command complete'
* on success, but when the specific service was never connected
* before then there might be an additional informational message
* 'HCPCRC8072I Recording entry not found' before the
* 'Command complete'. So I use strstr rather then the strncmp.
*/
if (strstr(cp_response,"Command complete"))
return 0;
else
return -EIO;
}
static int vmlogrdr_open (struct inode *inode, struct file *filp)
{
int dev_num = 0;
struct vmlogrdr_priv_t * logptr = NULL;
int connect_rc = 0;
int ret;
dev_num = iminor(inode);
if (dev_num > MAXMINOR)
return -ENODEV;
logptr = &sys_ser[dev_num];
/*
* only allow for blocking reads to be open
*/
if (filp->f_flags & O_NONBLOCK)
return -ENOSYS;
/* Besure this device hasn't already been opened */
lock_kernel();
spin_lock_bh(&logptr->priv_lock);
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
unlock_kernel();
return -EBUSY;
}
logptr->dev_in_use = 1;
logptr->connection_established = 0;
logptr->iucv_path_severed = 0;
atomic_set(&logptr->receive_ready, 0);
logptr->buffer_free = 1;
spin_unlock_bh(&logptr->priv_lock);
/* set the file options */
filp->private_data = logptr;
filp->f_op = &vmlogrdr_fops;
/* start recording for this service*/
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
if (ret)
pr_warning("vmlogrdr: failed to start "
"recording automatically\n");
}
/* create connection to the system service */
logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
if (!logptr->path)
goto out_dev;
connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
logptr->system_service, NULL, NULL,
logptr);
if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n",
logptr->system_service, connect_rc);
goto out_path;
}
/* We've issued the connect and now we must wait for a
* ConnectionComplete or ConnectinSevered Interrupt
* before we can continue to process.
*/
wait_event(conn_wait_queue, (logptr->connection_established)
|| (logptr->iucv_path_severed));
if (logptr->iucv_path_severed)
goto out_record;
ret = nonseekable_open(inode, filp);
unlock_kernel();
return ret;
out_record:
if (logptr->autorecording)
vmlogrdr_recording(logptr,0,logptr->autopurge);
out_path:
kfree(logptr->path); /* kfree(NULL) is ok. */
logptr->path = NULL;
out_dev:
logptr->dev_in_use = 0;
unlock_kernel();
return -EIO;
}
static int vmlogrdr_release (struct inode *inode, struct file *filp)
{
int ret;
struct vmlogrdr_priv_t * logptr = filp->private_data;
iucv_path_sever(logptr->path, NULL);
kfree(logptr->path);
logptr->path = NULL;
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
pr_warning("vmlogrdr: failed to stop "
"recording automatically\n");
}
logptr->dev_in_use = 0;
return 0;
}
static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
{
int rc, *temp;
/* we need to keep track of two data sizes here:
* The number of bytes we need to receive from iucv and
* the total number of bytes we actually write into the buffer.
*/
int user_data_count, iucv_data_count;
char * buffer;
if (atomic_read(&priv->receive_ready)) {
spin_lock_bh(&priv->priv_lock);
if (priv->residual_length){
/* receive second half of a record */
iucv_data_count = priv->residual_length;
user_data_count = 0;
buffer = priv->buffer;
} else {
/* receive a new record:
* We need to return the total length of the record
* + size of FENCE in the first 4 bytes of the buffer.
*/
iucv_data_count = priv->local_interrupt_buffer.length;
user_data_count = sizeof(int);
temp = (int*)priv->buffer;
*temp= iucv_data_count + sizeof(FENCE);
buffer = priv->buffer + sizeof(int);
}
/*
* If the record is bigger than our buffer, we receive only
* a part of it. We can get the rest later.
*/
if (iucv_data_count > NET_BUFFER_SIZE)
iucv_data_count = NET_BUFFER_SIZE;
rc = iucv_message_receive(priv->path,
&priv->local_interrupt_buffer,
0, buffer, iucv_data_count,
&priv->residual_length);
spin_unlock_bh(&priv->priv_lock);
/* An rc of 5 indicates that the record was bigger than
* the buffer, which is OK for us. A 9 indicates that the
* record was purged befor we could receive it.
*/
if (rc == 5)
rc = 0;
if (rc == 9)
atomic_set(&priv->receive_ready, 0);
} else {
rc = 1;
}
if (!rc) {
priv->buffer_free = 0;
user_data_count += iucv_data_count;
priv->current_position = priv->buffer;
if (priv->residual_length == 0){
/* the whole record has been captured,
* now add the fence */
atomic_dec(&priv->receive_ready);
buffer = priv->buffer + user_data_count;
memcpy(buffer, FENCE, sizeof(FENCE));
user_data_count += sizeof(FENCE);
}
priv->remaining = user_data_count;
}
return rc;
}
static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
size_t count, loff_t * ppos)
{
int rc;
struct vmlogrdr_priv_t * priv = filp->private_data;
while (priv->buffer_free) {
rc = vmlogrdr_receive_data(priv);
if (rc) {
rc = wait_event_interruptible(read_wait_queue,
atomic_read(&priv->receive_ready));
if (rc)
return rc;
}
}
/* copy only up to end of record */
if (count > priv->remaining)
count = priv->remaining;
if (copy_to_user(data, priv->current_position, count))
return -EFAULT;
*ppos += count;
priv->current_position += count;
priv->remaining -= count;
/* if all data has been transferred, set buffer free */
if (priv->remaining == 0)
priv->buffer_free = 1;
return count;
}
static ssize_t vmlogrdr_autopurge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autopurge=0;
break;
case '1':
priv->autopurge=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autopurge_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autopurge);
}
static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
vmlogrdr_autopurge_store);
static ssize_t vmlogrdr_purge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
char cp_command[80];
char cp_response[80];
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
if (buf[0] != '1')
return -EINVAL;
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
/*
* The recording command needs to be called with option QID
* for guests that have previlege classes A or B.
* Other guests will not recognize the command and we have to
* issue the same command without the QID parameter.
*/
if (recording_class_AB)
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE QID * ",
priv->recording_name);
else
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE ",
priv->recording_name);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
return count;
}
static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
static ssize_t vmlogrdr_autorecording_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autorecording=0;
break;
case '1':
priv->autorecording=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autorecording_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autorecording);
}
static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
vmlogrdr_autorecording_store);
static ssize_t vmlogrdr_recording_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret;
switch (buf[0]) {
case '0':
ret = vmlogrdr_recording(priv,0,0);
break;
case '1':
ret = vmlogrdr_recording(priv,1,0);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
else
return count;
}
static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
char *buf)
{
char cp_command[] = "QUERY RECORDING ";
int len;
cpcmd(cp_command, buf, 4096, NULL);
len = strlen(buf);
return len;
}
static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
NULL);
static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_autopurge.attr,
&dev_attr_purge.attr,
&dev_attr_autorecording.attr,
&dev_attr_recording.attr,
NULL,
};
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
spin_lock_bh(&priv->priv_lock);
if (priv->dev_in_use)
rc = -EBUSY;
spin_unlock_bh(&priv->priv_lock);
}
if (rc)
pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
dev_name(dev));
return rc;
}
static struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
static struct attribute_group vmlogrdr_attr_group = {
.attrs = vmlogrdr_attrs,
};
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.pm = &vmlogrdr_pm_ops,
};
static int vmlogrdr_register_driver(void)
{
int ret;
/* Register with iucv driver */
ret = iucv_register(&vmlogrdr_iucv_handler, 1);
if (ret)
goto out;
ret = driver_register(&vmlogrdr_driver);
if (ret)
goto out_iucv;
ret = driver_create_file(&vmlogrdr_driver,
&driver_attr_recording_status);
if (ret)
goto out_driver;
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
goto out_attr;
}
return 0;
out_attr:
driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
out_driver:
driver_unregister(&vmlogrdr_driver);
out_iucv:
iucv_unregister(&vmlogrdr_iucv_handler, 1);
out:
return ret;
}
static void vmlogrdr_unregister_driver(void)
{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
{
struct device *dev;
int ret;
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (dev) {
dev_set_name(dev, priv->internal_name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
* free the struct. Therefore, we specify kfree()
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
} else
return -ENOMEM;
ret = device_register(dev);
if (ret) {
put_device(dev);
return ret;
}
ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
if (ret) {
device_unregister(dev);
return ret;
}
priv->class_device = device_create(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
device_unregister(dev);
return ret;
}
priv->device = dev;
return 0;
}
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
device_unregister(priv->device);
priv->device=NULL;
}
return 0;
}
static int vmlogrdr_register_cdev(dev_t dev)
{
int rc = 0;
vmlogrdr_cdev = cdev_alloc();
if (!vmlogrdr_cdev) {
return -ENOMEM;
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
vmlogrdr_cdev->dev = dev;
rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
if (!rc)
return 0;
// cleanup: cdev is not fully registered, no cdev_del here!
kobject_put(&vmlogrdr_cdev->kobj);
vmlogrdr_cdev=NULL;
return rc;
}
static void vmlogrdr_cleanup(void)
{
int i;
if (vmlogrdr_cdev) {
cdev_del(vmlogrdr_cdev);
vmlogrdr_cdev=NULL;
}
for (i=0; i < MAXMINOR; ++i ) {
vmlogrdr_unregister_device(&sys_ser[i]);
free_page((unsigned long)sys_ser[i].buffer);
}
vmlogrdr_unregister_driver();
if (vmlogrdr_major) {
unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
vmlogrdr_major=0;
}
}
static int __init vmlogrdr_init(void)
{
int rc;
int i;
dev_t dev;
if (! MACHINE_IS_VM) {
pr_err("not running under VM, driver not loaded.\n");
return -ENODEV;
}
recording_class_AB = vmlogrdr_get_recording_class_AB();
rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
if (rc)
return rc;
vmlogrdr_major = MAJOR(dev);
rc=vmlogrdr_register_driver();
if (rc)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
}
sys_ser[i].current_position = sys_ser[i].buffer;
rc=vmlogrdr_register_device(&sys_ser[i]);
if (rc)
break;
}
if (rc)
goto cleanup;
rc = vmlogrdr_register_cdev(dev);
if (rc)
goto cleanup;
return 0;
cleanup:
vmlogrdr_cleanup();
return rc;
}
static void __exit vmlogrdr_exit(void)
{
vmlogrdr_cleanup();
return;
}
module_init(vmlogrdr_init);
module_exit(vmlogrdr_exit);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,110 @@
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
*
* Copyright IBM Corp. 2001, 2007
* Authors: Malcolm Beattie <beattiem@uk.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Frank Munzert <munzert@de.ibm.com>
*/
#ifndef _VMUR_H_
#define _VMUR_H_
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
/*
* we only support z/VM's default unit record devices:
* both in SPOOL directory control statement and in CP DEFINE statement
* RDR defaults to 2540 reader
* PUN defaults to 2540 punch
* PRT defaults to 1403 printer
*/
#define READER_PUNCH_DEVTYPE 0x2540
#define PRINTER_DEVTYPE 0x1403
/* z/VM spool file control block SFBLOK */
struct file_control_block {
char reserved_1[8];
char user_owner[8];
char user_orig[8];
__s32 data_recs;
__s16 rec_len;
__s16 file_num;
__u8 file_stat;
__u8 dev_type;
char reserved_2[6];
char file_name[12];
char file_type[12];
char create_date[8];
char create_time[8];
char reserved_3[6];
__u8 file_class;
__u8 sfb_lok;
__u64 distr_code;
__u32 reserved_4;
__u8 current_starting_copy_number;
__u8 sfblock_cntrl_flags;
__u8 reserved_5;
__u8 more_status_flags;
char rest[200];
} __attribute__ ((packed));
#define FLG_SYSTEM_HOLD 0x04
#define FLG_CP_DUMP 0x10
#define FLG_USER_HOLD 0x20
#define FLG_IN_USE 0x80
/*
* A struct urdev is created for each ur device that is made available
* via the ccw_device driver model.
*/
struct urdev {
struct ccw_device *cdev; /* Backpointer to ccw device */
struct mutex io_mutex; /* Serialises device IO */
struct completion *io_done; /* do_ur_io waits; irq completes */
struct device *device;
struct cdev *char_device;
struct ccw_dev_id dev_id; /* device id */
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
atomic_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
};
/*
* A struct urfile is allocated at open() time for each device and
* freed on release().
*/
struct urfile {
struct urdev *urd;
unsigned int flags;
size_t dev_reclen;
__u16 file_reclen;
};
/*
* Device major/minor definitions.
*/
#define UR_MAJOR 0 /* get dynamic major */
/*
* We map minor numbers directly to device numbers (0-FFFF) for simplicity.
* This avoids having to allocate (and manage) slot numbers.
*/
#define NUM_MINORS 65536
/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
#define MAX_RECS_PER_IO 511
#define WRITE_CCW_CMD 0x01
#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
#define CCWDEV_CU_DI(cutype, di) \
CCW_DEVICE(cutype, 0x00), .driver_info = (di)
#define FILE_RECLEN_OFFSET 4064 /* reclen offset in spool data block */
#endif /* _VMUR_H_ */

View File

@@ -0,0 +1,326 @@
/*
* Watchdog implementation based on z/VM Watchdog Timer API
*
* Copyright IBM Corp. 2004,2009
*
* The user space watchdog daemon can use this driver as
* /dev/vmwatchdog to have z/VM execute the specified CP
* command when the timeout expires. The default command is
* "IPL", which which cause an immediate reboot.
*/
#define KMSG_COMPONENT "vmwatchdog"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/suspend.h>
#include <linux/watchdog.h>
#include <linux/smp_lock.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#define MAX_CMDLEN 240
#define MIN_INTERVAL 15
static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
static int vmwdt_conceal;
static int vmwdt_nowayout = WATCHDOG_NOWAYOUT;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
MODULE_DESCRIPTION("z/VM Watchdog Timer");
module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
module_param_named(conceal, vmwdt_conceal, bool, 0644);
MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
" is active");
module_param_named(nowayout, vmwdt_nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
" (default=CONFIG_WATCHDOG_NOWAYOUT)");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static unsigned int vmwdt_interval = 60;
static unsigned long vmwdt_is_open;
static int vmwdt_expect_close;
#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
#define VMWDT_RUNNING 1 /* The watchdog is armed */
enum vmwdt_func {
/* function codes */
wdt_init = 0,
wdt_change = 1,
wdt_cancel = 2,
/* flags */
wdt_conceal = 0x80000000,
};
static int __diag288(enum vmwdt_func func, unsigned int timeout,
char *cmd, size_t len)
{
register unsigned long __func asm("2") = func;
register unsigned long __timeout asm("3") = timeout;
register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
register unsigned long __cmdl asm("5") = len;
int err;
err = -EINVAL;
asm volatile(
" diag %1,%3,0x288\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (err) : "d"(__func), "d"(__timeout),
"d"(__cmdp), "d"(__cmdl) : "1", "cc");
return err;
}
static int vmwdt_keepalive(void)
{
/* we allocate new memory every time to avoid having
* to track the state. static allocation is not an
* option since that might not be contiguous in real
* storage in case of a modular build */
static char *ebc_cmd;
size_t len;
int ret;
unsigned int func;
ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!ebc_cmd)
return -ENOMEM;
len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
ASCEBC(ebc_cmd, MAX_CMDLEN);
EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
set_bit(VMWDT_RUNNING, &vmwdt_is_open);
ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
return ret;
}
static int vmwdt_disable(void)
{
int ret = __diag288(wdt_cancel, 0, "", 0);
WARN_ON(ret != 0);
clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
return ret;
}
static int __init vmwdt_probe(void)
{
/* there is no real way to see if the watchdog is supported,
* so we try initializing it with a NOP command ("BEGIN")
* that won't cause any harm even if the following disable
* fails for some reason */
static char __initdata ebc_begin[] = {
194, 197, 199, 201, 213
};
if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
return -EINVAL;
return vmwdt_disable();
}
static int vmwdt_open(struct inode *i, struct file *f)
{
int ret;
lock_kernel();
if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
unlock_kernel();
return -EBUSY;
}
ret = vmwdt_keepalive();
if (ret)
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
unlock_kernel();
return ret ? ret : nonseekable_open(i, f);
}
static int vmwdt_close(struct inode *i, struct file *f)
{
if (vmwdt_expect_close == 42)
vmwdt_disable();
vmwdt_expect_close = 0;
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
return 0;
}
static struct watchdog_info vmwdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.firmware_version = 0,
.identity = "z/VM Watchdog Timer",
};
static int vmwdt_ioctl(struct inode *i, struct file *f,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user((void __user *)arg, &vmwdt_info,
sizeof(vmwdt_info)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, (int __user *)arg);
case WDIOC_GETTEMP:
return -EINVAL;
case WDIOC_SETOPTIONS:
{
int options, ret;
if (get_user(options, (int __user *)arg))
return -EFAULT;
ret = -EINVAL;
if (options & WDIOS_DISABLECARD) {
ret = vmwdt_disable();
if (ret)
return ret;
}
if (options & WDIOS_ENABLECARD) {
ret = vmwdt_keepalive();
}
return ret;
}
case WDIOC_GETTIMEOUT:
return put_user(vmwdt_interval, (int __user *)arg);
case WDIOC_SETTIMEOUT:
{
int interval;
if (get_user(interval, (int __user *)arg))
return -EFAULT;
if (interval < MIN_INTERVAL)
return -EINVAL;
vmwdt_interval = interval;
}
return vmwdt_keepalive();
case WDIOC_KEEPALIVE:
return vmwdt_keepalive();
}
return -EINVAL;
}
static ssize_t vmwdt_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
if(count) {
if (!vmwdt_nowayout) {
size_t i;
/* note: just in case someone wrote the magic character
* five months ago... */
vmwdt_expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf+i))
return -EFAULT;
if (c == 'V')
vmwdt_expect_close = 42;
}
}
/* someone wrote to us, we should restart timer */
vmwdt_keepalive();
}
return count;
}
static int vmwdt_resume(void)
{
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
return NOTIFY_DONE;
}
/*
* It makes no sense to go into suspend while the watchdog is running.
* Depending on the memory size, the watchdog might trigger, while we
* are still saving the memory.
* We reuse the open flag to ensure that suspend and watchdog open are
* exclusive operations
*/
static int vmwdt_suspend(void)
{
if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
pr_err("The system cannot be suspended while the watchdog"
" is in use\n");
return NOTIFY_BAD;
}
if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
pr_err("The system cannot be suspended while the watchdog"
" is running\n");
return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
/*
* This function is called for suspend and resume.
*/
static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
switch (event) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
return vmwdt_resume();
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
return vmwdt_suspend();
default:
return NOTIFY_DONE;
}
}
static struct notifier_block vmwdt_power_notifier = {
.notifier_call = vmwdt_power_event,
};
static const struct file_operations vmwdt_fops = {
.open = &vmwdt_open,
.release = &vmwdt_close,
.ioctl = &vmwdt_ioctl,
.write = &vmwdt_write,
.owner = THIS_MODULE,
};
static struct miscdevice vmwdt_dev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &vmwdt_fops,
};
static int __init vmwdt_init(void)
{
int ret;
ret = vmwdt_probe();
if (ret)
return ret;
ret = register_pm_notifier(&vmwdt_power_notifier);
if (ret)
return ret;
ret = misc_register(&vmwdt_dev);
if (ret) {
unregister_pm_notifier(&vmwdt_power_notifier);
return ret;
}
return 0;
}
module_init(vmwdt_init);
static void __exit vmwdt_exit(void)
{
unregister_pm_notifier(&vmwdt_power_notifier);
misc_deregister(&vmwdt_dev);
}
module_exit(vmwdt_exit);

View File

@@ -0,0 +1,819 @@
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
* For more information please refer to Documentation/s390/zfcpdump.txt
*
* Copyright IBM Corp. 2003,2008
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "zdump"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#include <asm/sigp.h>
#include <asm/uaccess.h>
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
#define TO_USER 0
#define TO_KERNEL 1
#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
enum arch_id {
ARCH_S390 = 0,
ARCH_S390X = 1,
};
/* dump system info */
struct sys_info {
enum arch_id arch;
unsigned long sa_base;
u32 sa_size;
int cpu_map[NR_CPUS];
unsigned long mem_size;
union save_area lc_mask;
};
struct ipib_info {
unsigned long ipib;
u32 checksum;
} __attribute__((packed));
static struct sys_info sys_info;
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
static struct dentry *zcore_file;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct ipl_parameter_block *ipl_block;
/*
* Copy memory from HSA to kernel or user memory (not reentrant):
*
* @dest: Kernel or user buffer where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
* @mode: Either TO_KERNEL or TO_USER
*/
static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
{
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
if (count == 0)
return 0;
/* copy first block */
offs = 0;
if ((src % PAGE_SIZE) != 0) {
blk_num = src / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest,
buf + (src % PAGE_SIZE), offs))
return -EFAULT;
} else
memcpy(dest, buf + (src % PAGE_SIZE), offs);
}
if (offs == count)
goto out;
/* copy middle */
for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
blk_num = (src + offs) / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs,
buf, PAGE_SIZE))
return -EFAULT;
} else
memcpy(dest + offs, buf, PAGE_SIZE);
}
if (offs == count)
goto out;
/* copy last block */
blk_num = (src + offs) / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs, buf,
PAGE_SIZE))
return -EFAULT;
} else
memcpy(dest + offs, buf, count - offs);
out:
return 0;
}
static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
{
return memcpy_hsa((void __force *) dest, src, count, TO_USER);
}
static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
{
return memcpy_hsa(dest, src, count, TO_KERNEL);
}
static int memcpy_real(void *dest, unsigned long src, size_t count)
{
unsigned long flags;
int rc = -EFAULT;
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
register unsigned long _src asm("4") = src;
register unsigned long _len2 asm("5") = (unsigned long) count;
if (count == 0)
return 0;
flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */
asm volatile (
"0: mvcle %1,%2,0x0\n"
"1: jo 0b\n"
" lhi %0,0x0\n"
"2:\n"
EX_TABLE(1b,2b)
: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
"+d" (_len2), "=m" (*((long*)dest))
: "m" (*((long*)src))
: "cc", "memory");
__raw_local_irq_ssm(flags);
return rc;
}
static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
{
static char buf[4096];
int offs = 0, size;
while (offs < count) {
size = min(sizeof(buf), count - offs);
if (memcpy_real(buf, src + offs, size))
return -EFAULT;
if (copy_to_user(dest + offs, buf, size))
return -EFAULT;
offs += size;
}
return 0;
}
#ifdef __s390x__
/*
* Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
*/
static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
int cpu)
{
int i;
for (i = 0; i < 16; i++) {
out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
out->s390.acc_regs[i] = in->s390x.acc_regs[i];
out->s390.ctrl_regs[i] =
in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
}
/* locore for 31 bit has only space for fpregs 0,2,4,6 */
out->s390.fp_regs[0] = in->s390x.fp_regs[0];
out->s390.fp_regs[1] = in->s390x.fp_regs[2];
out->s390.fp_regs[2] = in->s390x.fp_regs[4];
out->s390.fp_regs[3] = in->s390x.fp_regs[6];
memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
out->s390.psw[1] |= 0x8; /* set bit 12 */
memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
out->s390.pref_reg = in->s390x.pref_reg;
out->s390.timer = in->s390x.timer;
out->s390.clk_cmp = in->s390x.clk_cmp;
}
static void __init s390x_to_s390_save_areas(void)
{
int i = 1;
static union save_area tmp;
while (zfcpdump_save_areas[i]) {
s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
i++;
}
}
#endif /* __s390x__ */
static int __init init_cpu_info(enum arch_id arch)
{
union save_area *sa;
/* get info for boot cpu from lowcore, stored in the HSA */
sa = kmalloc(sizeof(*sa), GFP_KERNEL);
if (!sa)
return -ENOMEM;
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
TRACE("could not copy from HSA\n");
kfree(sa);
return -EIO;
}
zfcpdump_save_areas[0] = sa;
#ifdef __s390x__
/* convert s390x regs to s390, if we are dumping an s390 Linux */
if (arch == ARCH_S390)
s390x_to_s390_save_areas();
#endif
return 0;
}
static DEFINE_MUTEX(zcore_mutex);
#define DUMP_VERSION 0x3
#define DUMP_MAGIC 0xa8190173618f23fdULL
#define DUMP_ARCH_S390X 2
#define DUMP_ARCH_S390 1
#define HEADER_SIZE 4096
/* dump header dumped according to s390 crash dump format */
struct zcore_header {
u64 magic;
u32 version;
u32 header_size;
u32 dump_level;
u32 page_size;
u64 mem_size;
u64 mem_start;
u64 mem_end;
u32 num_pages;
u32 pad1;
u64 tod;
struct cpuid cpu_id;
u32 arch_id;
u32 volnr;
u32 build_arch;
u64 rmem_size;
char pad2[4016];
} __attribute__((packed,__aligned__(16)));
static struct zcore_header zcore_header = {
.magic = DUMP_MAGIC,
.version = DUMP_VERSION,
.header_size = 4096,
.dump_level = 0,
.page_size = PAGE_SIZE,
.mem_start = 0,
#ifdef __s390x__
.build_arch = DUMP_ARCH_S390X,
#else
.build_arch = DUMP_ARCH_S390,
#endif
};
/*
* Copy lowcore info to buffer. Use map in order to copy only register parts.
*
* @buf: User buffer
* @sa: Pointer to save area
* @sa_off: Offset in save area to copy
* @len: Number of bytes to copy
*/
static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
{
int i;
char *lc_mask = (char*)&sys_info.lc_mask;
for (i = 0; i < len; i++) {
if (!lc_mask[i + sa_off])
continue;
if (copy_to_user(buf + i, sa + sa_off + i, 1))
return -EFAULT;
}
return 0;
}
/*
* Copy lowcores info to memory, if necessary
*
* @buf: User buffer
* @addr: Start address of buffer in dump memory
* @count: Size of buffer
*/
static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
{
unsigned long end;
int i = 0;
if (count == 0)
return 0;
end = start + count;
while (zfcpdump_save_areas[i]) {
unsigned long cp_start, cp_end; /* copy range */
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
if (sys_info.arch == ARCH_S390)
prefix = zfcpdump_save_areas[i]->s390.pref_reg;
else
prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
if ((end < sa_start) || (start > sa_end))
goto next;
cp_start = max(start, sa_start);
cp_end = min(end, sa_end);
buf_off = cp_start - start;
sa_off = cp_start - sa_start;
len = cp_end - cp_start;
TRACE("copy_lc for: %lx\n", start);
if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
return -EFAULT;
next:
i++;
}
return 0;
}
/*
* Read routine for zcore character device
* First 4K are dump header
* Next 32MB are HSA Memory
* Rest is read from absolute Memory
*/
static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned long mem_start; /* Start address in memory */
size_t mem_offs; /* Offset in dump memory */
size_t hdr_count; /* Size of header part of output buffer */
size_t size;
int rc;
mutex_lock(&zcore_mutex);
if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
rc = -EINVAL;
goto fail;
}
count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
/* Copy dump header */
if (*ppos < HEADER_SIZE) {
size = min(count, (size_t) (HEADER_SIZE - *ppos));
if (copy_to_user(buf, &zcore_header + *ppos, size)) {
rc = -EFAULT;
goto fail;
}
hdr_count = size;
mem_start = 0;
} else {
hdr_count = 0;
mem_start = *ppos - HEADER_SIZE;
}
mem_offs = 0;
/* Copy from HSA data */
if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
- mem_start));
rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
if (rc)
goto fail;
mem_offs += size;
}
/* Copy from real mem */
size = count - mem_offs - hdr_count;
rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
size);
if (rc)
goto fail;
/*
* Since s390 dump analysis tools like lcrash or crash
* expect register sets in the prefix pages of the cpus,
* we copy them into the read buffer, if necessary.
* buf + hdr_count: Start of memory part of output buffer
* mem_start: Start memory address to copy from
* count - hdr_count: Size of memory area to copy
*/
if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
rc = -EFAULT;
goto fail;
}
*ppos += count;
fail:
mutex_unlock(&zcore_mutex);
return (rc < 0) ? rc : count;
}
static int zcore_open(struct inode *inode, struct file *filp)
{
if (!hsa_available)
return -ENODATA;
else
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
static int zcore_release(struct inode *inode, struct file *filep)
{
diag308(DIAG308_REL_HSA, NULL);
hsa_available = 0;
return 0;
}
static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
{
loff_t rc;
mutex_lock(&zcore_mutex);
switch (orig) {
case 0:
file->f_pos = offset;
rc = file->f_pos;
break;
case 1:
file->f_pos += offset;
rc = file->f_pos;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&zcore_mutex);
return rc;
}
static const struct file_operations zcore_fops = {
.owner = THIS_MODULE,
.llseek = zcore_lseek,
.read = zcore_read,
.open = zcore_open,
.release = zcore_release,
};
static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, filp->private_data,
MEMORY_CHUNKS * CHUNK_INFO_SIZE);
}
static int zcore_memmap_open(struct inode *inode, struct file *filp)
{
int i;
char *buf;
struct mem_chunk *chunk_array;
chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
detect_memory_layout(chunk_array);
buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
if (!buf) {
kfree(chunk_array);
return -ENOMEM;
}
for (i = 0; i < MEMORY_CHUNKS; i++) {
sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
(unsigned long long) chunk_array[i].addr,
(unsigned long long) chunk_array[i].size);
if (chunk_array[i].size == 0)
break;
}
kfree(chunk_array);
filp->private_data = buf;
return 0;
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static const struct file_operations zcore_memmap_fops = {
.owner = THIS_MODULE,
.read = zcore_memmap_read,
.open = zcore_memmap_open,
.release = zcore_memmap_release,
};
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
if (ipl_block) {
diag308(DIAG308_SET, ipl_block);
diag308(DIAG308_IPL, NULL);
}
return count;
}
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
return 0;
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
{
return 0;
}
static const struct file_operations zcore_reipl_fops = {
.owner = THIS_MODULE,
.write = zcore_reipl_write,
.open = zcore_reipl_open,
.release = zcore_reipl_release,
};
static void __init set_s390_lc_mask(union save_area *map)
{
memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
}
static void __init set_s390x_lc_mask(union save_area *map)
{
memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
}
/*
* Initialize dump globals for a given architecture
*/
static int __init sys_info_init(enum arch_id arch)
{
int rc;
switch (arch) {
case ARCH_S390X:
pr_alert("DETECTED 'S390X (64 bit) OS'\n");
sys_info.sa_base = SAVE_AREA_BASE_S390X;
sys_info.sa_size = sizeof(struct save_area_s390x);
set_s390x_lc_mask(&sys_info.lc_mask);
break;
case ARCH_S390:
pr_alert("DETECTED 'S390 (32 bit) OS'\n");
sys_info.sa_base = SAVE_AREA_BASE_S390;
sys_info.sa_size = sizeof(struct save_area_s390);
set_s390_lc_mask(&sys_info.lc_mask);
break;
default:
pr_alert("0x%x is an unknown architecture.\n",arch);
return -EINVAL;
}
sys_info.arch = arch;
rc = init_cpu_info(arch);
if (rc)
return rc;
sys_info.mem_size = real_memory_size;
return 0;
}
static int __init check_sdias(void)
{
int rc, act_hsa_size;
rc = sclp_sdias_blk_count();
if (rc < 0) {
TRACE("Could not determine HSA size\n");
return rc;
}
act_hsa_size = (rc - 1) * PAGE_SIZE;
if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
TRACE("HSA size too small: %i\n", act_hsa_size);
return -EINVAL;
}
return 0;
}
static int __init get_mem_size(unsigned long *mem)
{
int i;
struct mem_chunk *chunk_array;
chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
detect_memory_layout(chunk_array);
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (chunk_array[i].size == 0)
break;
*mem += chunk_array[i].size;
}
kfree(chunk_array);
return 0;
}
static int __init zcore_header_init(int arch, struct zcore_header *hdr)
{
int rc;
unsigned long memory = 0;
if (arch == ARCH_S390X)
hdr->arch_id = DUMP_ARCH_S390X;
else
hdr->arch_id = DUMP_ARCH_S390;
rc = get_mem_size(&memory);
if (rc)
return rc;
hdr->mem_size = memory;
hdr->rmem_size = memory;
hdr->mem_end = sys_info.mem_size;
hdr->num_pages = memory / PAGE_SIZE;
hdr->tod = get_clock();
get_cpu_id(&hdr->cpu_id);
return 0;
}
/*
* Provide IPL parameter information block from either HSA or memory
* for future reipl
*/
static int __init zcore_reipl_init(void)
{
struct ipib_info ipib_info;
int rc;
rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
if (rc)
return rc;
if (ipib_info.ipib == 0)
return 0;
ipl_block = (void *) __get_free_page(GFP_KERNEL);
if (!ipl_block)
return -ENOMEM;
if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, ipib_info.ipib, PAGE_SIZE);
if (rc) {
free_page((unsigned long) ipl_block);
return rc;
}
if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);
ipl_block = NULL;
}
return 0;
}
static int __init zcore_init(void)
{
unsigned char arch;
int rc;
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return -ENODATA;
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
debug_register_view(zcore_dbf, &debug_sprintf_view);
debug_set_level(zcore_dbf, 6);
TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
rc = sclp_sdias_init();
if (rc)
goto fail;
rc = check_sdias();
if (rc)
goto fail;
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc)
goto fail;
#ifndef __s390x__
if (arch == ARCH_S390X) {
pr_alert("The 32-bit dump tool cannot be used for a "
"64-bit system\n");
rc = -EINVAL;
goto fail;
}
#endif
rc = sys_info_init(arch);
if (rc)
goto fail;
rc = zcore_header_init(arch, &zcore_header);
if (rc)
goto fail;
rc = zcore_reipl_init();
if (rc)
goto fail;
zcore_dir = debugfs_create_dir("zcore" , NULL);
if (!zcore_dir) {
rc = -ENOMEM;
goto fail;
}
zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
&zcore_fops);
if (!zcore_file) {
rc = -ENOMEM;
goto fail_dir;
}
zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
NULL, &zcore_memmap_fops);
if (!zcore_memmap_file) {
rc = -ENOMEM;
goto fail_file;
}
zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
NULL, &zcore_reipl_fops);
if (!zcore_reipl_file) {
rc = -ENOMEM;
goto fail_memmap_file;
}
hsa_available = 1;
return 0;
fail_memmap_file:
debugfs_remove(zcore_memmap_file);
fail_file:
debugfs_remove(zcore_file);
fail_dir:
debugfs_remove(zcore_dir);
fail:
diag308(DIAG308_REL_HSA, NULL);
return rc;
}
static void __exit zcore_exit(void)
{
debug_unregister(zcore_dbf);
sclp_sdias_exit();
free_page((unsigned long) ipl_block);
debugfs_remove(zcore_reipl_file);
debugfs_remove(zcore_memmap_file);
debugfs_remove(zcore_file);
debugfs_remove(zcore_dir);
diag308(DIAG308_REL_HSA, NULL);
}
MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
MODULE_DESCRIPTION("zcore module for zfcpdump support");
MODULE_LICENSE("GPL");
subsys_initcall(zcore_init);
module_exit(zcore_exit);

View File

@@ -0,0 +1,14 @@
#
# Makefile for the S/390 common i/o drivers
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o

View File

@@ -0,0 +1,151 @@
/*
* drivers/s390/cio/airq.c
* Support for adapter interruptions
*
* Copyright IBM Corp. 1999,2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
#define NR_AIRQS 32
#define NR_AIRQS_PER_WORD sizeof(unsigned long)
#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD)
union indicator_t {
unsigned long word[NR_AIRQ_WORDS];
unsigned char byte[NR_AIRQS];
} __attribute__((packed));
struct airq_t {
adapter_int_handler_t handler;
void *drv_data;
};
static union indicator_t indicators[MAX_ISC+1];
static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
static int register_airq(struct airq_t *airq, u8 isc)
{
int i;
for (i = 0; i < NR_AIRQS; i++)
if (!cmpxchg(&airqs[isc][i], NULL, airq))
return i;
return -ENOMEM;
}
/**
* s390_register_adapter_interrupt() - register adapter interrupt handler
* @handler: adapter handler to be registered
* @drv_data: driver data passed with each call to the handler
* @isc: isc for which the handler should be called
*
* Returns:
* Pointer to the indicator to be used on success
* ERR_PTR() if registration failed
*/
void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
void *drv_data, u8 isc)
{
struct airq_t *airq;
char dbf_txt[16];
int ret;
if (isc > MAX_ISC)
return ERR_PTR(-EINVAL);
airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
if (!airq) {
ret = -ENOMEM;
goto out;
}
airq->handler = handler;
airq->drv_data = drv_data;
ret = register_airq(airq, isc);
out:
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
CIO_TRACE_EVENT(4, dbf_txt);
if (ret < 0) {
kfree(airq);
return ERR_PTR(ret);
} else
return &indicators[isc].byte[ret];
}
EXPORT_SYMBOL(s390_register_adapter_interrupt);
/**
* s390_unregister_adapter_interrupt - unregister adapter interrupt handler
* @ind: indicator for which the handler is to be unregistered
* @isc: interruption subclass
*/
void s390_unregister_adapter_interrupt(void *ind, u8 isc)
{
struct airq_t *airq;
char dbf_txt[16];
int i;
i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
CIO_TRACE_EVENT(4, dbf_txt);
indicators[isc].byte[i] = 0;
airq = xchg(&airqs[isc][i], NULL);
/*
* Allow interrupts to complete. This will ensure that the airq handle
* is no longer referenced by any interrupt handler.
*/
synchronize_sched();
kfree(airq);
}
EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
void do_adapter_IO(u8 isc)
{
int w;
int i;
unsigned long word;
struct airq_t *airq;
/*
* Access indicator array in word-sized chunks to minimize storage
* fetch operations.
*/
for (w = 0; w < NR_AIRQ_WORDS; w++) {
word = indicators[isc].word[w];
i = w * NR_AIRQS_PER_WORD;
/*
* Check bytes within word for active indicators.
*/
while (word) {
if (word & INDICATOR_MASK) {
airq = airqs[isc][i];
/* Make sure gcc reads from airqs only once. */
barrier();
if (likely(airq))
airq->handler(&indicators[isc].byte[i],
airq->drv_data);
else
/*
* Reset ill-behaved indicator.
*/
indicators[isc].byte[i] = 0;
}
word <<= 8;
i++;
}
}
}

View File

@@ -0,0 +1,403 @@
/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <asm/cio.h>
#include <asm/uaccess.h>
#include "blacklist.h"
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
/*
* "Blacklisting" of certain devices:
* Device numbers given in the commandline as cio_ignore=... won't be known
* to Linux.
*
* These can be single devices or ranges of devices
*/
/* 65536 bits for each set to indicate if a devno is blacklisted or not */
#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
typedef enum {add, free} range_action;
/*
* Function: blacklist_range
* (Un-)blacklist the devices from-to
*/
static int blacklist_range(range_action action, unsigned int from_ssid,
unsigned int to_ssid, unsigned int from,
unsigned int to, int msgtrigger)
{
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
if (msgtrigger)
pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
"range for cio_ignore\n", from_ssid, from,
to_ssid, to);
return 1;
}
while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
(from <= to))) {
if (action == add)
set_bit(from, bl_dev[from_ssid]);
else
clear_bit(from, bl_dev[from_ssid]);
from++;
if (from > __MAX_SUBCHANNEL) {
from_ssid++;
from = 0;
}
}
return 0;
}
static int pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
unsigned int value;
diff = 0;
*val = 0;
while (isxdigit(**cp) && (diff <= max_digit)) {
if (isdigit(**cp))
value = **cp - '0';
else
value = tolower(**cp) - 'a' + 10;
*val = *val * 16 + value;
(*cp)++;
diff++;
}
if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
return 1;
return 0;
}
static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
unsigned int *devno, int msgtrigger)
{
char *str_work;
int val, rc, ret;
rc = 1;
if (*str == '\0')
goto out;
/* old style */
str_work = str;
val = simple_strtoul(str, &str_work, 16);
if (*str_work == '\0') {
if (val <= __MAX_SUBCHANNEL) {
*devno = val;
*ssid = 0;
*cssid = 0;
rc = 0;
}
goto out;
}
/* new style */
str_work = str;
ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
if (ret || (str_work[0] != '\0'))
goto out;
rc = 0;
out:
if (rc && msgtrigger)
pr_warning("%s is not a valid device for the cio_ignore "
"kernel parameter\n", str);
return rc;
}
static int blacklist_parse_parameters(char *str, range_action action,
int msgtrigger)
{
unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
int rc, totalrc;
char *parm;
range_action ra;
totalrc = 0;
while ((parm = strsep(&str, ","))) {
rc = 0;
ra = action;
if (*parm == '!') {
if (ra == add)
ra = free;
else
ra = add;
parm++;
}
if (strcmp(parm, "all") == 0) {
from_cssid = 0;
from_ssid = 0;
from = 0;
to_cssid = __MAX_CSSID;
to_ssid = __MAX_SSID;
to = __MAX_SUBCHANNEL;
} else {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from, msgtrigger);
if (!rc) {
if (parm != NULL)
rc = parse_busid(parm, &to_cssid,
&to_ssid, &to,
msgtrigger);
else {
to_cssid = from_cssid;
to_ssid = from_ssid;
to = from;
}
}
}
if (!rc) {
rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
msgtrigger);
if (rc)
totalrc = -EINVAL;
} else
totalrc = -EINVAL;
}
return totalrc;
}
static int __init
blacklist_setup (char *str)
{
CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
if (blacklist_parse_parameters(str, add, 1))
return 0;
return 1;
}
__setup ("cio_ignore=", blacklist_setup);
/* Checking if devices are blacklisted */
/*
* Function: is_blacklisted
* Returns 1 if the given devicenumber can be found in the blacklist,
* otherwise 0.
* Used by validate_subchannel()
*/
int
is_blacklisted (int ssid, int devno)
{
return test_bit (devno, bl_dev[ssid]);
}
#ifdef CONFIG_PROC_FS
/*
* Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore
*/
static int blacklist_parse_proc_parameters(char *buf)
{
int rc;
char *parm;
parm = strsep(&buf, " ");
if (strcmp("free", parm) == 0)
rc = blacklist_parse_parameters(buf, free, 0);
else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
css_schedule_reprobe();
return rc;
}
/* Iterator struct for all devices. */
struct ccwdev_iter {
int devno;
int ssid;
int in_range;
};
static void *
cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
{
struct ccwdev_iter *iter = s->private;
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
memset(iter, 0, sizeof(*iter));
iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
return iter;
}
static void
cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
{
}
static void *
cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
{
struct ccwdev_iter *iter;
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
iter = it;
if (iter->devno == __MAX_SUBCHANNEL) {
iter->devno = 0;
iter->ssid++;
if (iter->ssid > __MAX_SSID)
return NULL;
} else
iter->devno++;
(*offset)++;
return iter;
}
static int
cio_ignore_proc_seq_show(struct seq_file *s, void *it)
{
struct ccwdev_iter *iter;
iter = it;
if (!is_blacklisted(iter->ssid, iter->devno))
/* Not blacklisted, nothing to output. */
return 0;
if (!iter->in_range) {
/* First device in range. */
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1))
/* Singular device. */
return seq_printf(s, "0.%x.%04x\n",
iter->ssid, iter->devno);
iter->in_range = 1;
return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
}
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1)) {
/* Last device in range. */
iter->in_range = 0;
return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
}
return 0;
}
static ssize_t
cio_ignore_write(struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
char *buf;
ssize_t rc, ret, i;
if (*offset)
return -EINVAL;
if (user_len > 65536)
user_len = 65536;
buf = vmalloc (user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
memset(buf, 0, user_len + 1);
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
rc = -EFAULT;
goto out_free;
}
i = user_len - 1;
while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
buf[i] = '\0';
i--;
}
ret = blacklist_parse_proc_parameters(buf);
if (ret)
rc = ret;
else
rc = user_len;
out_free:
vfree (buf);
return rc;
}
static const struct seq_operations cio_ignore_proc_seq_ops = {
.start = cio_ignore_proc_seq_start,
.stop = cio_ignore_proc_seq_stop,
.next = cio_ignore_proc_seq_next,
.show = cio_ignore_proc_seq_show,
};
static int
cio_ignore_proc_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &cio_ignore_proc_seq_ops,
sizeof(struct ccwdev_iter));
}
static const struct file_operations cio_ignore_proc_fops = {
.open = cio_ignore_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.write = cio_ignore_write,
};
static int
cio_ignore_proc_init (void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&cio_ignore_proc_fops);
if (!entry)
return -ENOENT;
return 0;
}
__initcall (cio_ignore_proc_init);
#endif /* CONFIG_PROC_FS */

View File

@@ -0,0 +1,6 @@
#ifndef S390_BLACKLIST_H
#define S390_BLACKLIST_H
extern int is_blacklisted (int ssid, int devno);
#endif

View File

@@ -0,0 +1,703 @@
/*
* bus driver for ccwgroup
*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#define CCW_BUS_ID_SIZE 20
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
* one of its functions, namely the translation of single subchannels
* to devices that use multiple subchannels.
*/
/* a device matches a driver if all its slave devices match the same
* entry of the driver */
static int
ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(drv);
if (gdev->creator_id == gdrv->driver_id)
return 1;
return 0;
}
static int
ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
{
/* TODO */
return 0;
}
static struct bus_type ccwgroup_bus_type;
static void
__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[8];
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
static void ccwgroup_ungroup_callback(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
}
mutex_unlock(&gdev->reg_mutex);
}
static ssize_t
ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
int rc;
gdev = to_ccwgroupdev(dev);
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state != CCWGROUP_OFFLINE) {
rc = -EINVAL;
goto out;
}
/* Note that we cannot unregister the device from one of its
* attribute methods, so we have to use this roundabout approach.
*/
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
out:
if (rc) {
if (rc != -EAGAIN)
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
static void
ccwgroup_release (struct device *dev)
{
struct ccwgroup_device *gdev;
int i;
gdev = to_ccwgroupdev(dev);
for (i = 0; i < gdev->count; i++) {
if (gdev->cdev[i]) {
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
put_device(&gdev->cdev[i]->dev);
}
}
kfree(gdev);
}
static int
__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
"group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
for (i = 0; i < gdev->count; i++)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
return 0;
}
static int __get_next_bus_id(const char **buf, char *bus_id)
{
int rc, len;
char *start, *end;
start = (char *)*buf;
end = strchr(start, ',');
if (!end) {
/* Last entry. Strip trailing newline, if applicable. */
end = strchr(start, '\n');
if (end)
*end = '\0';
len = strlen(start) + 1;
} else {
len = end - start + 1;
end++;
}
if (len < CCW_BUS_ID_SIZE) {
strlcpy(bus_id, start, len);
rc = 0;
} else
rc = -EINVAL;
*buf = end;
return rc;
}
static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
{
int cssid, ssid, devno;
/* Must be of form %x.%x.%04x */
if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
return 0;
return 1;
}
/**
* ccwgroup_create_from_string() - create and register a ccw group device
* @root: parent device for the new device
* @creator_id: identifier of creating driver
* @cdrv: ccw driver of slave devices
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
* Create and register a new ccw group device as a child of @root. Slave
* devices are obtained from the list of bus ids given in @buf and must all
* belong to @cdrv.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
struct ccw_driver *cdrv, int num_devices,
const char *buf)
{
struct ccwgroup_device *gdev;
int rc, i;
char tmp_bus_id[CCW_BUS_ID_SIZE];
const char *curr_buf;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
if (!gdev)
return -ENOMEM;
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
gdev->creator_id = creator_id;
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = root;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
curr_buf = buf;
for (i = 0; i < num_devices && curr_buf; i++) {
rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
if (rc != 0)
goto error;
if (!__is_valid_bus_id(tmp_bus_id)) {
rc = -EINVAL;
goto error;
}
gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
if (!gdev->cdev[i]
|| gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
}
/* Check for sufficient number of bus ids. */
if (i < num_devices && !curr_buf) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
if (i == num_devices && strlen(curr_buf) > 0) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
rc = device_add(&gdev->dev);
if (rc)
goto error;
get_device(&gdev->dev);
rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
if (rc) {
device_unregister(&gdev->dev);
goto error;
}
rc = __ccwgroup_create_symlinks(gdev);
if (!rc) {
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return 0;
}
device_remove_file(&gdev->dev, &dev_attr_ungroup);
device_unregister(&gdev->dev);
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
}
EXPORT_SYMBOL(ccwgroup_create_from_string);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data);
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
};
static int __init init_ccwgroup(void)
{
int ret;
ret = bus_register(&ccwgroup_bus_type);
if (ret)
return ret;
ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
if (ret)
bus_unregister(&ccwgroup_bus_type);
return ret;
}
static void __exit cleanup_ccwgroup(void)
{
bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
static int
ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
goto out;
gdev->state = CCWGROUP_ONLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static int
ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
goto out;
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static ssize_t
ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
unsigned long value;
int ret;
if (!dev->driver)
return -ENODEV;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (!try_module_get(gdrv->owner))
return -EINVAL;
ret = strict_strtoul(buf, 0, &value);
if (ret)
goto out;
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
ret = ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
out:
module_put(gdrv->owner);
return (ret == 0) ? count : ret;
}
static ssize_t
ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
{
int online;
online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
return sprintf(buf, online ? "1\n" : "0\n");
}
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
static int
ccwgroup_probe (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
int ret;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if ((ret = device_create_file(dev, &dev_attr_online)))
return ret;
ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
if (ret)
device_remove_file(dev, &dev_attr_online);
return ret;
}
static int
ccwgroup_remove (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
device_remove_file(dev, &dev_attr_online);
device_remove_file(dev, &dev_attr_ungroup);
if (!dev->driver)
return 0;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->remove)
gdrv->remove(gdev);
return 0;
}
static void ccwgroup_shutdown(struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
if (!dev->driver)
return;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
static int ccwgroup_pm_prepare(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
/* Fail while device is being set online/offline. */
if (atomic_read(&gdev->onoff))
return -EAGAIN;
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->prepare ? gdrv->prepare(gdev) : 0;
}
static void ccwgroup_pm_complete(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return;
if (gdrv->complete)
gdrv->complete(gdev);
}
static int ccwgroup_pm_freeze(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->freeze ? gdrv->freeze(gdev) : 0;
}
static int ccwgroup_pm_thaw(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->thaw ? gdrv->thaw(gdev) : 0;
}
static int ccwgroup_pm_restore(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
static struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
.thaw = ccwgroup_pm_thaw,
.restore = ccwgroup_pm_restore,
};
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
.uevent = ccwgroup_uevent,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
.pm = &ccwgroup_pm_ops,
};
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
if (action == BUS_NOTIFY_UNBIND_DRIVER)
device_schedule_callback(dev, ccwgroup_ungroup_callback);
return NOTIFY_OK;
}
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
*
* This function is mainly a wrapper around driver_register().
*/
int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
cdriver->driver.name = cdriver->name;
cdriver->driver.owner = cdriver->owner;
return driver_register(&cdriver->driver);
}
static int
__ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
/**
* ccwgroup_driver_unregister() - deregister a ccw group driver
* @cdriver: driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister().
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
get_driver(&cdriver->driver);
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
mutex_lock(&gdev->reg_mutex);
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
mutex_unlock(&gdev->reg_mutex);
put_device(dev);
}
put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
* This is a dummy probe function for ccw devices that are slave devices in
* a ccw group device.
* Returns:
* always %0
*/
int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
static struct ccwgroup_device *
__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
gdev = dev_get_drvdata(&cdev->dev);
if (gdev) {
if (get_device(&gdev->dev)) {
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev))
return gdev;
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
}
return NULL;
}
return NULL;
}
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
* @cdev: ccw device to be removed
*
* This is a remove function for ccw devices that are slave devices in a ccw
* group device. It sets the ccw device offline and also deregisters the
* embedding ccw group device.
*/
void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
gdev = __ccwgroup_get_gdev_by_cdev(cdev);
if (gdev) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
}
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccwgroup_driver_register);
EXPORT_SYMBOL(ccwgroup_driver_unregister);
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);

View File

@@ -0,0 +1,731 @@
/*
* drivers/s390/cio/chp.c
*
* Copyright IBM Corp. 1999,2007
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/bug.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/crw.h>
#include "cio.h"
#include "css.h"
#include "ioasm.h"
#include "cio_debug.h"
#include "chp.h"
#define to_channelpath(device) container_of(device, struct channel_path, dev)
#define CHP_INFO_UPDATE_INTERVAL 1*HZ
enum cfg_task_t {
cfg_none,
cfg_configure,
cfg_deconfigure
};
/* Map for pending configure tasks. */
static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
static DEFINE_MUTEX(cfg_lock);
static int cfg_busy;
/* Map for channel-path status. */
static struct sclp_chp_info chp_info;
static DEFINE_MUTEX(info_lock);
/* Time after which channel-path status may be outdated. */
static unsigned long chp_info_expires;
/* Workqueue to perform pending configure tasks. */
static struct workqueue_struct *chp_wq;
static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
static wait_queue_head_t cfg_wait_queue;
/* Return channel_path struct for given chpid. */
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
{
return channel_subsystems[chpid.cssid]->chps[chpid.id];
}
/* Set vary state for given chpid. */
static void set_chp_logically_online(struct chp_id chpid, int onoff)
{
chpid_to_chp(chpid)->state = onoff;
}
/* On succes return 0 if channel-path is varied offline, 1 if it is varied
* online. Return -ENODEV if channel-path is not registered. */
int chp_get_status(struct chp_id chpid)
{
return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
}
/**
* chp_get_sch_opm - return opm for subchannel
* @sch: subchannel
*
* Calculate and return the operational path mask (opm) based on the chpids
* used by the subchannel and the status of the associated channel-paths.
*/
u8 chp_get_sch_opm(struct subchannel *sch)
{
struct chp_id chpid;
int opm;
int i;
opm = 0;
chp_id_init(&chpid);
for (i = 0; i < 8; i++) {
opm <<= 1;
chpid.id = sch->schib.pmcw.chpid[i];
if (chp_get_status(chpid) != 0)
opm |= 1;
}
return opm;
}
EXPORT_SYMBOL_GPL(chp_get_sch_opm);
/**
* chp_is_registered - check if a channel-path is registered
* @chpid: channel-path ID
*
* Return non-zero if a channel-path with the given chpid is registered,
* zero otherwise.
*/
int chp_is_registered(struct chp_id chpid)
{
return chpid_to_chp(chpid) != NULL;
}
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
*/
static int s390_vary_chpid(struct chp_id chpid, int on)
{
char dbf_text[15];
int status;
sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
chpid.id);
CIO_TRACE_EVENT(2, dbf_text);
status = chp_get_status(chpid);
if (!on && !status)
return 0;
set_chp_logically_online(chpid, on);
chsc_chp_vary(chpid, on);
return 0;
}
/*
* Channel measurement related functions
*/
static ssize_t chp_measurement_chars_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct device *device;
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
if (!chp->cmg_chars)
return 0;
return memory_read_from_buffer(buf, count, &off,
chp->cmg_chars, sizeof(struct cmg_chars));
}
static struct bin_attribute chp_measurement_chars_attr = {
.attr = {
.name = "measurement_chars",
.mode = S_IRUSR,
},
.size = sizeof(struct cmg_chars),
.read = chp_measurement_chars_read,
};
static void chp_measurement_copy_block(struct cmg_entry *buf,
struct channel_subsystem *css,
struct chp_id chpid)
{
void *area;
struct cmg_entry *entry, reference_buf;
int idx;
if (chpid.id < 128) {
area = css->cub_addr1;
idx = chpid.id;
} else {
area = css->cub_addr2;
idx = chpid.id - 128;
}
entry = area + (idx * sizeof(struct cmg_entry));
do {
memcpy(buf, entry, sizeof(*entry));
memcpy(&reference_buf, entry, sizeof(*entry));
} while (reference_buf.values[0] != buf->values[0]);
}
static ssize_t chp_measurement_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct channel_subsystem *css;
struct device *device;
unsigned int size;
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
css = to_css(chp->dev.parent);
size = sizeof(struct cmg_entry);
/* Only allow single reads. */
if (off || count < size)
return 0;
chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
count = size;
return count;
}
static struct bin_attribute chp_measurement_attr = {
.attr = {
.name = "measurement",
.mode = S_IRUSR,
},
.size = sizeof(struct cmg_entry),
.read = chp_measurement_read,
};
void chp_remove_cmg_attr(struct channel_path *chp)
{
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
device_remove_bin_file(&chp->dev, &chp_measurement_attr);
}
int chp_add_cmg_attr(struct channel_path *chp)
{
int ret;
ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
if (ret)
return ret;
ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
if (ret)
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
return ret;
}
/*
* Files for the channel path entries.
*/
static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
sprintf(buf, "offline\n"));
}
static ssize_t chp_status_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_path *cp = to_channelpath(dev);
char cmd[10];
int num_args;
int error;
num_args = sscanf(buf, "%5s", cmd);
if (!num_args)
return count;
if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
error = s390_vary_chpid(cp->chpid, 1);
else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
error = s390_vary_chpid(cp->chpid, 0);
else
error = -EINVAL;
return error < 0 ? error : count;
}
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
static ssize_t chp_configure_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *cp;
int status;
cp = to_channelpath(dev);
status = chp_info_get_status(cp->chpid);
if (status < 0)
return status;
return snprintf(buf, PAGE_SIZE, "%d\n", status);
}
static int cfg_wait_idle(void);
static ssize_t chp_configure_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_path *cp;
int val;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
cp = to_channelpath(dev);
chp_cfg_schedule(cp->chpid, val);
cfg_wait_idle();
return count;
}
static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
return sprintf(buf, "%x\n", chp->desc.desc);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
if (chp->cmg == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
return sprintf(buf, "%x\n", chp->cmg);
}
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
static ssize_t chp_shared_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
if (chp->shared == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
return sprintf(buf, "%x\n", chp->shared);
}
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
&dev_attr_cmg.attr,
&dev_attr_shared.attr,
NULL,
};
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
};
static void chp_release(struct device *dev)
{
struct channel_path *cp;
cp = to_channelpath(dev);
kfree(cp);
}
/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
*
* Create and register data structure representing new channel-path. Return
* zero on success, non-zero otherwise.
*/
int chp_new(struct chp_id chpid)
{
struct channel_path *chp;
int ret;
if (chp_is_registered(chpid))
return 0;
chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
if (!chp)
return -ENOMEM;
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.release = chp_release;
/* Obtain channel path description and fill it in. */
ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
ret = -ENODEV;
goto out_free;
}
/* Get channel-measurement characteristics. */
if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
ret = chsc_get_channel_measurement_chars(chp);
if (ret)
goto out_free;
} else {
chp->cmg = -1;
}
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
ret = device_register(&chp->dev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
put_device(&chp->dev);
goto out;
}
ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
if (ret) {
device_unregister(&chp->dev);
goto out;
}
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
}
}
channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
out_free:
kfree(chp);
out:
return ret;
}
/**
* chp_get_chp_desc - return newly allocated channel-path description
* @chpid: channel-path ID
*
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel-path ID. Return %NULL on error.
*/
void *chp_get_chp_desc(struct chp_id chpid)
{
struct channel_path *chp;
struct channel_path_desc *desc;
chp = chpid_to_chp(chpid);
if (!chp)
return NULL;
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
if (!desc)
return NULL;
memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
return desc;
}
/**
* chp_process_crw - process channel-path status change
* @crw0: channel report-word to handler
* @crw1: second channel-report word (always NULL)
* @overflow: crw overflow indication
*
* Handle channel-report-words indicating that the status of a channel-path
* has changed.
*/
static void chp_process_crw(struct crw *crw0, struct crw *crw1,
int overflow)
{
struct chp_id chpid;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
/*
* Check for solicited machine checks. These are
* created by reset channel path and need not be
* handled here.
*/
if (crw0->slct) {
CIO_CRW_EVENT(2, "solicited machine check for "
"channel path %02X\n", crw0->rsid);
return;
}
chp_id_init(&chpid);
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
if (!chp_is_registered(chpid))
chp_new(chpid);
chsc_chp_online(chpid);
break;
case CRW_ERC_PERRI: /* Path has gone. */
case CRW_ERC_PERRN:
chsc_chp_offline(chpid);
break;
default:
CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
crw0->erc);
}
}
int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (!(ssd->path_mask & mask))
continue;
if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
continue;
if ((ssd->fla_valid_mask & mask) &&
((ssd->fla[i] & link->fla_mask) != link->fla))
continue;
return mask;
}
return 0;
}
EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
static inline int info_bit_num(struct chp_id id)
{
return id.id + id.cssid * (__MAX_CHPID + 1);
}
/* Force chp_info refresh on next call to info_validate(). */
static void info_expire(void)
{
mutex_lock(&info_lock);
chp_info_expires = jiffies - 1;
mutex_unlock(&info_lock);
}
/* Ensure that chp_info is up-to-date. */
static int info_update(void)
{
int rc;
mutex_lock(&info_lock);
rc = 0;
if (time_after(jiffies, chp_info_expires)) {
/* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info);
chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
}
mutex_unlock(&info_lock);
return rc;
}
/**
* chp_info_get_status - retrieve configure status of a channel-path
* @chpid: channel-path ID
*
* On success, return 0 for standby, 1 for configured, 2 for reserved,
* 3 for not recognized. Return negative error code on error.
*/
int chp_info_get_status(struct chp_id chpid)
{
int rc;
int bit;
rc = info_update();
if (rc)
return rc;
bit = info_bit_num(chpid);
mutex_lock(&info_lock);
if (!chp_test_bit(chp_info.recognized, bit))
rc = CHP_STATUS_NOT_RECOGNIZED;
else if (chp_test_bit(chp_info.configured, bit))
rc = CHP_STATUS_CONFIGURED;
else if (chp_test_bit(chp_info.standby, bit))
rc = CHP_STATUS_STANDBY;
else
rc = CHP_STATUS_RESERVED;
mutex_unlock(&info_lock);
return rc;
}
/* Return configure task for chpid. */
static enum cfg_task_t cfg_get_task(struct chp_id chpid)
{
return chp_cfg_task[chpid.cssid][chpid.id];
}
/* Set configure task for chpid. */
static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
{
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
}
/* Perform one configure/deconfigure request. Reschedule work function until
* last request. */
static void cfg_func(struct work_struct *work)
{
struct chp_id chpid;
enum cfg_task_t t;
int rc;
mutex_lock(&cfg_lock);
t = cfg_none;
chp_id_for_each(&chpid) {
t = cfg_get_task(chpid);
if (t != cfg_none) {
cfg_set_task(chpid, cfg_none);
break;
}
}
mutex_unlock(&cfg_lock);
switch (t) {
case cfg_configure:
rc = sclp_chp_configure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_online(chpid);
}
break;
case cfg_deconfigure:
rc = sclp_chp_deconfigure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_offline(chpid);
}
break;
case cfg_none:
/* Get updated information after last change. */
info_update();
mutex_lock(&cfg_lock);
cfg_busy = 0;
mutex_unlock(&cfg_lock);
wake_up_interruptible(&cfg_wait_queue);
return;
}
queue_work(chp_wq, &cfg_work);
}
/**
* chp_cfg_schedule - schedule chpid configuration request
* @chpid - channel-path ID
* @configure - Non-zero for configure, zero for deconfigure
*
* Schedule a channel-path configuration/deconfiguration request.
*/
void chp_cfg_schedule(struct chp_id chpid, int configure)
{
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
configure);
mutex_lock(&cfg_lock);
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
cfg_busy = 1;
mutex_unlock(&cfg_lock);
queue_work(chp_wq, &cfg_work);
}
/**
* chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
* @chpid - channel-path ID
*
* Cancel an active channel-path deconfiguration request if it has not yet
* been performed.
*/
void chp_cfg_cancel_deconfigure(struct chp_id chpid)
{
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
mutex_lock(&cfg_lock);
if (cfg_get_task(chpid) == cfg_deconfigure)
cfg_set_task(chpid, cfg_none);
mutex_unlock(&cfg_lock);
}
static int cfg_wait_idle(void)
{
if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
return -ERESTARTSYS;
return 0;
}
static int __init chp_init(void)
{
struct chp_id chpid;
int ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
chp_wq = create_singlethread_workqueue("cio_chp");
if (!chp_wq) {
crw_unregister_handler(CRW_RSC_CPATH);
return -ENOMEM;
}
INIT_WORK(&cfg_work, cfg_func);
init_waitqueue_head(&cfg_wait_queue);
if (info_update())
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
chp_new(chpid);
}
return 0;
}
subsys_initcall(chp_init);

View File

@@ -0,0 +1,64 @@
/*
* drivers/s390/cio/chp.h
*
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef S390_CHP_H
#define S390_CHP_H S390_CHP_H
#include <linux/types.h>
#include <linux/device.h>
#include <asm/chpid.h>
#include "chsc.h"
#include "css.h"
#define CHP_STATUS_STANDBY 0
#define CHP_STATUS_CONFIGURED 1
#define CHP_STATUS_RESERVED 2
#define CHP_STATUS_NOT_RECOGNIZED 3
#define CHP_ONLINE 0
#define CHP_OFFLINE 1
#define CHP_VARY_ON 2
#define CHP_VARY_OFF 3
struct chp_link {
struct chp_id chpid;
u32 fla_mask;
u16 fla;
};
static inline int chp_test_bit(u8 *bitmap, int num)
{
int byte = num >> 3;
int mask = 128 >> (num & 7);
return (bitmap[byte] & mask) ? 1 : 0;
}
struct channel_path {
struct chp_id chpid;
int state;
struct channel_path_desc desc;
/* Channel-measurement related stuff: */
int cmg;
int shared;
void *cmg_chars;
struct device dev;
};
int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
void *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
int chp_info_get_status(struct chp_id chpid);
int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
#endif /* S390_CHP_H */

View File

@@ -0,0 +1,977 @@
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
* Copyright IBM Corp. 1999,2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/crw.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chp.h"
#include "chsc.h"
static void *sei_page;
/**
* chsc_error_from_response() - convert a chsc response to an error
* @response: chsc response code
*
* Returns an appropriate Linux error code for @response.
*/
int chsc_error_from_response(int response)
{
switch (response) {
case 0x0001:
return 0;
case 0x0002:
case 0x0003:
case 0x0006:
case 0x0007:
case 0x0008:
case 0x000a:
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
default:
return -EIO;
}
}
EXPORT_SYMBOL_GPL(chsc_error_from_response);
struct chsc_ssd_area {
struct chsc_header request;
u16 :10;
u16 ssid:2;
u16 :4;
u16 f_sch; /* first subchannel */
u16 :16;
u16 l_sch; /* last subchannel */
u32 :32;
struct chsc_header response;
u32 :32;
u8 sch_valid : 1;
u8 dev_valid : 1;
u8 st : 3; /* subchannel type */
u8 zeroes : 3;
u8 unit_addr; /* unit address */
u16 devno; /* device number */
u8 path_mask;
u8 fla_valid_mask;
u16 sch; /* subchannel */
u8 chpid[8]; /* chpids 0-7 */
u16 fla[8]; /* full link addresses 0-7 */
} __attribute__ ((packed));
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
unsigned long page;
struct chsc_ssd_area *ssd_area;
int ccode;
int ret;
int i;
int mask;
page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
return -ENOMEM;
ssd_area = (struct chsc_ssd_area *) page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
ssd_area->ssid = schid.ssid;
ssd_area->f_sch = schid.sch_no;
ssd_area->l_sch = schid.sch_no;
ccode = chsc(ssd_area);
/* Check response. */
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out_free;
}
ret = chsc_error_from_response(ssd_area->response.code);
if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
goto out_free;
}
if (!ssd_area->sch_valid) {
ret = -ENODEV;
goto out_free;
}
/* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG))
goto out_free;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd_area->path_mask & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = ssd_area->chpid[i];
}
if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
out_free:
free_page(page);
return ret;
}
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
if (sch->driver && sch->driver->chp_event)
if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
goto out_unreg;
spin_unlock_irq(sch->lock);
return 0;
out_unreg:
sch->lpm = 0;
spin_unlock_irq(sch->lock);
css_schedule_eval(sch->schid);
return 0;
}
void chsc_chp_offline(struct chp_id chpid)
{
char dbf_txt[15];
struct chp_link link;
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) <= 0)
return;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
{
struct schib schib;
/*
* We don't know the device yet, but since a path
* may be available now to the device we'll have
* to do recognition again.
* Since we don't have any idea about which chpid
* that beast may be on we'll have to do a stsch
* on all devices, grr...
*/
if (stsch_err(schid, &schib))
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
css_schedule_eval(schid);
return 0;
}
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, data, CHP_ONLINE);
spin_unlock_irq(sch->lock);
return 0;
}
static void s390_process_res_acc(struct chp_link *link)
{
char dbf_txt[15];
sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
link->chpid.id);
CIO_TRACE_EVENT( 2, dbf_txt);
if (link->fla != 0) {
sprintf(dbf_txt, "fla%x", link->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
}
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* I/O resources may have become accessible.
* Scan through all subchannels that may be concerned and
* do a validation on those.
* The more information we have (info), the less scanning
* will we have to do.
*/
for_each_subchannel_staged(__s390_process_res_acc,
s390_process_res_acc_new_sch, link);
}
static int
__get_chpid_from_lir(void *data)
{
struct lir {
u8 iq;
u8 ic;
u16 sci;
/* incident-node descriptor */
u32 indesc[28];
/* attached-node descriptor */
u32 andesc[28];
/* incident-specific information */
u32 isinfo[28];
} __attribute__ ((packed)) *lir;
lir = data;
if (!(lir->iq&0x80))
/* NULL link incident record */
return -EINVAL;
if (!(lir->indesc[0]&0xc0000000))
/* node descriptor not valid */
return -EINVAL;
if (!(lir->indesc[0]&0x10000000))
/* don't handle device-type nodes - FIXME */
return -EINVAL;
/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
return (u16) (lir->indesc[0]&0x000000ff);
}
struct chsc_sei_area {
struct chsc_header request;
u32 reserved1;
u32 reserved2;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 vf; /* validity flags */
u8 rs; /* reporting source */
u8 cc; /* content code */
u16 fla; /* full link address */
u16 rsid; /* reporting source id */
u32 reserved5;
u32 reserved6;
u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
{
struct chp_id chpid;
int id;
CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
return;
id = __get_chpid_from_lir(sei_area->ccdf);
if (id < 0)
CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
else {
chp_id_init(&chpid);
chpid.id = id;
chsc_chp_offline(chpid);
}
}
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
int status;
CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
"rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
return;
chp_id_init(&chpid);
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
if (status < 0)
chp_new(chpid);
else if (!status)
return;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
link.fla = sei_area->fla;
if ((sei_area->vf & 0xc0) == 0xc0)
/* full link address */
link.fla_mask = 0xffff;
else
/* link address */
link.fla_mask = 0xff00;
}
s390_process_res_acc(&link);
}
struct chp_config_data {
u8 map[32];
u8 op;
u8 pc;
};
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
int num;
char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
if (sei_area->rs != 0)
return;
data = (struct chp_config_data *) &(sei_area->ccdf);
chp_id_init(&chpid);
for (num = 0; num <= __MAX_CHPID; num++) {
if (!chp_test_bit(data->map, num))
continue;
chpid.id = num;
pr_notice("Processing %s for channel path %x.%02x\n",
events[data->op], chpid.cssid, chpid.id);
switch (data->op) {
case 0:
chp_cfg_schedule(chpid, 1);
break;
case 1:
chp_cfg_schedule(chpid, 0);
break;
case 2:
chp_cfg_cancel_deconfigure(chpid);
break;
}
}
}
static void chsc_process_sei(struct chsc_sei_area *sei_area)
{
/* Check if we might have lost some information. */
if (sei_area->flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
css_schedule_eval_all();
}
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
chsc_process_sei_link_incident(sei_area);
break;
case 2: /* i/o resource accessibiliy */
chsc_process_sei_res_acc(sei_area);
break;
case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area);
break;
default: /* other stuff */
CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
sei_area->cc);
break;
}
}
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct chsc_sei_area *sei_area;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
if (!sei_page)
return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
sei_area = sei_page;
CIO_TRACE_EVENT(2, "prcss");
do {
memset(sei_area, 0, sizeof(*sei_area));
sei_area->request.length = 0x0010;
sei_area->request.code = 0x000e;
if (chsc(sei_area))
break;
if (sei_area->response.code == 0x0001) {
CIO_CRW_EVENT(4, "chsc: sei successful\n");
chsc_process_sei(sei_area);
} else {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei_area->response.code);
break;
}
} while (sei_area->flags & 0x80);
}
void chsc_chp_online(struct chp_id chpid)
{
char dbf_txt[15];
struct chp_link link;
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) != 0) {
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
}
}
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
struct chp_id chpid, int on)
{
unsigned long flags;
struct chp_link link;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
spin_lock_irqsave(sch->lock, flags);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, &link,
on ? CHP_VARY_ON : CHP_VARY_OFF);
spin_unlock_irqrestore(sch->lock, flags);
}
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
{
struct chp_id *chpid = data;
__s390_subchannel_vary_chpid(sch, *chpid, 0);
return 0;
}
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
{
struct chp_id *chpid = data;
__s390_subchannel_vary_chpid(sch, *chpid, 1);
return 0;
}
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
struct schib schib;
if (stsch_err(schid, &schib))
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
css_schedule_eval(schid);
return 0;
}
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
* @on: non-zero for vary online, zero for vary offline
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct chp_link link;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/
if (on)
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &link);
else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &link);
return 0;
}
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
int i;
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
}
}
static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
int i, ret;
ret = 0;
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
ret = chp_add_cmg_attr(css->chps[i]);
if (ret)
goto cleanup;
}
return ret;
cleanup:
for (--i; i >= 0; i--) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
}
return ret;
}
int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
{
struct {
struct chsc_header request;
u32 operation_code : 2;
u32 : 30;
u32 key : 4;
u32 : 28;
u32 zeroes1;
u32 cub_addr1;
u32 zeroes2;
u32 cub_addr2;
u32 reserved[13];
struct chsc_header response;
u32 status : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
} __attribute__ ((packed)) *secm_area;
int ret, ccode;
secm_area = page;
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area);
if (ccode > 0)
return (ccode == 3) ? -ENODEV : -EBUSY;
switch (secm_area->response.code) {
case 0x0102:
case 0x0103:
ret = -EINVAL;
break;
default:
ret = chsc_error_from_response(secm_area->response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
return ret;
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{
void *secm_area;
int ret;
secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!secm_area)
return -ENOMEM;
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!css->cub_addr1 || !css->cub_addr2) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
free_page((unsigned long)secm_area);
return -ENOMEM;
}
}
ret = __chsc_do_secm(css, enable, secm_area);
if (!ret) {
css->cm_enabled = enable;
if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css);
if (ret) {
memset(secm_area, 0, PAGE_SIZE);
__chsc_do_secm(css, 0, secm_area);
css->cm_enabled = 0;
}
} else
chsc_remove_cmg_attr(css);
}
if (!css->cm_enabled) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
free_page((unsigned long)secm_area);
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
int c, int m,
struct chsc_response_struct *resp)
{
int ccode, ret;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 c : 1;
u32 fmt : 4;
u32 cssid : 8;
u32 : 4;
u32 rfmt : 4;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 zeroes1;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpd_area;
if ((rfmt == 1) && !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scpd_area)
return -ENOMEM;
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
scpd_area->m = m;
scpd_area->c = c;
scpd_area->fmt = fmt;
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out;
}
ret = chsc_error_from_response(scpd_area->response.code);
if (ret == 0)
/* Success. */
memcpy(resp, &scpd_area->response, scpd_area->response.length);
else
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
out:
free_page((unsigned long)scpd_area);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
struct chsc_response_struct *chsc_resp;
int ret;
chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
if (!chsc_resp)
return -ENOMEM;
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
if (ret)
goto out_free;
memcpy(desc, &chsc_resp->data, chsc_resp->length);
out_free:
kfree(chsc_resp);
return ret;
}
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
switch (chp->cmg) {
case 2:
case 3:
chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
GFP_KERNEL);
if (chp->cmg_chars) {
int i, mask;
struct cmg_chars *cmg_chars;
cmg_chars = chp->cmg_chars;
for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
mask = 0x80 >> (i + 3);
if (cmcv & mask)
cmg_chars->values[i] = chars->values[i];
else
cmg_chars->values[i] = 0;
}
}
break;
default:
/* No cmg-dependent data. */
break;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
int ccode, ret;
struct {
struct chsc_header request;
u32 : 24;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 zeroes1;
struct chsc_header response;
u32 zeroes2;
u32 not_valid : 1;
u32 shared : 1;
u32 : 22;
u32 chpid : 8;
u32 cmcv : 5;
u32 : 11;
u32 cmgq : 8;
u32 cmg : 8;
u32 zeroes3;
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scmc_area)
return -ENOMEM;
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
scmc_area->first_chpid = chp->chpid.id;
scmc_area->last_chpid = chp->chpid.id;
ccode = chsc(scmc_area);
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out;
}
ret = chsc_error_from_response(scmc_area->response.code);
if (ret == 0) {
/* Success. */
if (!scmc_area->not_valid) {
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *)
&scmc_area->data);
} else {
chp->cmg = -1;
chp->shared = -1;
}
} else {
CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
}
out:
free_page((unsigned long)scmc_area);
return ret;
}
int __init chsc_alloc_sei_area(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sei_page) {
CIO_MSG_EVENT(0, "Can't allocate page for processing of "
"chsc machine checks!\n");
return -ENOMEM;
}
ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
kfree(sei_page);
return ret;
}
void __init chsc_free_sei_area(void)
{
crw_unregister_handler(CRW_RSC_CSS);
kfree(sei_page);
}
int __init
chsc_enable_facility(int operation_code)
{
int ret;
struct {
struct chsc_header request;
u8 reserved1:4;
u8 format:4;
u8 reserved2;
u16 operation_code;
u32 reserved3;
u32 reserved4;
u32 operation_data_area[252];
struct chsc_header response;
u32 reserved5:4;
u32 format2:4;
u32 reserved6:24;
} __attribute__ ((packed)) *sda_area;
sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
if (!sda_area)
return -ENOMEM;
sda_area->request.length = 0x0400;
sda_area->request.code = 0x0031;
sda_area->operation_code = operation_code;
ret = chsc(sda_area);
if (ret > 0) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
switch (sda_area->response.code) {
case 0x0101:
ret = -EOPNOTSUPP;
break;
default:
ret = chsc_error_from_response(sda_area->response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
operation_code, sda_area->response.code);
out:
free_page((unsigned long)sda_area);
return ret;
}
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;
int __init
chsc_determine_css_characteristics(void)
{
int result;
struct {
struct chsc_header request;
u32 reserved1;
u32 reserved2;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
u32 chsc_char[518];
} __attribute__ ((packed)) *scsc_area;
scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scsc_area)
return -ENOMEM;
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
result = chsc(scsc_area);
if (result) {
result = (result == 3) ? -ENODEV : -EBUSY;
goto exit;
}
result = chsc_error_from_response(scsc_area->response.code);
if (result == 0) {
memcpy(&css_general_characteristics, scsc_area->general_char,
sizeof(css_general_characteristics));
memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
sizeof(css_chsc_characteristics));
} else
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
free_page ((unsigned long) scsc_area);
return result;
}
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
struct {
struct chsc_header request;
unsigned int rsvd0;
unsigned int op : 8;
unsigned int rsvd1 : 8;
unsigned int ctrl : 16;
unsigned int rsvd2[5];
struct chsc_header response;
unsigned int rsvd3[7];
} __attribute__ ((packed)) *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0020;
rr->request.code = 0x0033;
rr->op = op;
rr->ctrl = ctrl;
rc = chsc(rr);
if (rc)
return -EIO;
rc = (rr->response.code == 0x0001) ? 0 : -EIO;
return rc;
}
int chsc_sstpi(void *page, void *result, size_t size)
{
struct {
struct chsc_header request;
unsigned int rsvd0[3];
struct chsc_header response;
char data[size];
} __attribute__ ((packed)) *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0010;
rr->request.code = 0x0038;
rc = chsc(rr);
if (rc)
return -EIO;
memcpy(result, &rr->data, size);
return (rr->response.code == 0x0001) ? 0 : -EIO;
}

View File

@@ -0,0 +1,83 @@
#ifndef S390_CHSC_H
#define S390_CHSC_H
#include <linux/types.h>
#include <linux/device.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/schid.h>
#define CHSC_SDA_OC_MSS 0x2
struct chsc_header {
u16 length;
u16 code;
} __attribute__ ((packed));
#define NR_MEASUREMENT_CHARS 5
struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed));
#define NR_MEASUREMENT_ENTRIES 8
struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
} __attribute__ ((packed));
struct channel_path_desc {
u8 flags;
u8 lsn;
u8 desc;
u8 chpid;
u8 swla;
u8 zeroes;
u8 chla;
u8 chpp;
} __attribute__ ((packed));
struct channel_path;
struct css_chsc_char {
u64 res;
u64 : 20;
u32 secm : 1; /* bit 84 */
u32 : 1;
u32 scmc : 1; /* bit 86 */
u32 : 20;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
u32 : 19;
}__attribute__((packed));
extern struct css_chsc_char css_chsc_characteristics;
struct chsc_ssd_info {
u8 path_mask;
u8 fla_valid_mask;
struct chp_id chpid[8];
u16 fla[8];
};
extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
extern int chsc_alloc_sei_area(void);
extern void chsc_free_sei_area(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
int c, int m,
struct chsc_response_struct *resp);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc);
void chsc_chp_online(struct chp_id chpid);
void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_error_from_response(int response);
#endif

View File

@@ -0,0 +1,850 @@
/*
* Driver for s390 chsc subchannels
*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "chsc_sch.h"
#include "ioasm.h"
static debug_info_t *chsc_debug_msg_id;
static debug_info_t *chsc_debug_log_id;
#define CHSC_MSG(imp, args...) do { \
debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
} while (0)
#define CHSC_LOG(imp, txt) do { \
debug_text_event(chsc_debug_log_id, imp , txt); \
} while (0)
static void CHSC_LOG_HEX(int level, void *data, int length)
{
while (length > 0) {
debug_event(chsc_debug_log_id, level, data, length);
length -= chsc_debug_log_id->buf_size;
data += chsc_debug_log_id->buf_size;
}
}
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("driver for s390 chsc subchannels");
MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
struct chsc_private *private = sch->private;
struct chsc_request *request = private->request;
struct irb *irb = (struct irb *)__LC_IRB;
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
/* Copy irb to provided request and set done. */
if (!request) {
CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
sch->schid.ssid, sch->schid.sch_no);
return;
}
private->request = NULL;
memcpy(&request->irb, irb, sizeof(*irb));
cio_update_schib(sch);
complete(&request->completion);
put_device(&sch->dev);
}
static int chsc_subchannel_probe(struct subchannel *sch)
{
struct chsc_private *private;
int ret;
CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
sch->schid.ssid, sch->schid.sch_no);
sch->isc = CHSC_SCH_ISC;
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
kfree(private);
} else {
sch->private = private;
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
}
return ret;
}
static int chsc_subchannel_remove(struct subchannel *sch)
{
struct chsc_private *private;
cio_disable_subchannel(sch);
private = sch->private;
sch->private = NULL;
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
}
kfree(private);
return 0;
}
static void chsc_subchannel_shutdown(struct subchannel *sch)
{
cio_disable_subchannel(sch);
}
static int chsc_subchannel_prepare(struct subchannel *sch)
{
int cc;
struct schib schib;
/*
* Don't allow suspend while the subchannel is not idle
* since we don't have a way to clear the subchannel and
* cannot disable it with a request running.
*/
cc = stsch(sch->schid, &schib);
if (!cc && scsw_stctl(&schib.scsw))
return -EAGAIN;
return 0;
}
static int chsc_subchannel_freeze(struct subchannel *sch)
{
return cio_disable_subchannel(sch);
}
static int chsc_subchannel_restore(struct subchannel *sch)
{
return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
}
static struct css_device_id chsc_subchannel_ids[] = {
{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
.owner = THIS_MODULE,
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
.remove = chsc_subchannel_remove,
.shutdown = chsc_subchannel_shutdown,
.prepare = chsc_subchannel_prepare,
.freeze = chsc_subchannel_freeze,
.thaw = chsc_subchannel_restore,
.restore = chsc_subchannel_restore,
.name = "chsc_subchannel",
};
static int __init chsc_init_dbfs(void)
{
chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
16 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
debug_set_level(chsc_debug_msg_id, 2);
chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
if (!chsc_debug_log_id)
goto out;
debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
debug_set_level(chsc_debug_log_id, 2);
return 0;
out:
if (chsc_debug_msg_id)
debug_unregister(chsc_debug_msg_id);
return -ENOMEM;
}
static void chsc_remove_dbfs(void)
{
debug_unregister(chsc_debug_log_id);
debug_unregister(chsc_debug_msg_id);
}
static int __init chsc_init_sch_driver(void)
{
return css_driver_register(&chsc_subchannel_driver);
}
static void chsc_cleanup_sch_driver(void)
{
css_driver_unregister(&chsc_subchannel_driver);
}
static DEFINE_SPINLOCK(chsc_lock);
static int chsc_subchannel_match_next_free(struct device *dev, void *data)
{
struct subchannel *sch = to_subchannel(dev);
return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
}
static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
{
struct device *dev;
dev = driver_find_device(&chsc_subchannel_driver.drv,
sch ? &sch->dev : NULL, NULL,
chsc_subchannel_match_next_free);
return dev ? to_subchannel(dev) : NULL;
}
/**
* chsc_async() - try to start a chsc request asynchronously
* @chsc_area: request to be started
* @request: request structure to associate
*
* Tries to start a chsc request on one of the existing chsc subchannels.
* Returns:
* %0 if the request was performed synchronously
* %-EINPROGRESS if the request was successfully started
* %-EBUSY if all chsc subchannels are busy
* %-ENODEV if no chsc subchannels are available
* Context:
* interrupts disabled, chsc_lock held
*/
static int chsc_async(struct chsc_async_area *chsc_area,
struct chsc_request *request)
{
int cc;
struct chsc_private *private;
struct subchannel *sch = NULL;
int ret = -ENODEV;
char dbf[10];
chsc_area->header.key = PAGE_DEFAULT_KEY;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
private = sch->private;
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
continue;
}
chsc_area->header.sid = sch->schid;
CHSC_LOG(2, "schid");
CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
cc = chsc(chsc_area);
sprintf(dbf, "cc:%d", cc);
CHSC_LOG(2, dbf);
switch (cc) {
case 0:
ret = 0;
break;
case 1:
sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
ret = -EINPROGRESS;
private->request = request;
break;
case 2:
ret = -EBUSY;
break;
default:
ret = -ENODEV;
}
spin_unlock(sch->lock);
CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
sch->schid.ssid, sch->schid.sch_no, cc);
if (ret == -EINPROGRESS)
return -EINPROGRESS;
put_device(&sch->dev);
if (ret == 0)
return 0;
}
return ret;
}
static void chsc_log_command(struct chsc_async_area *chsc_area)
{
char dbf[10];
sprintf(dbf, "CHSC:%x", chsc_area->header.code);
CHSC_LOG(0, dbf);
CHSC_LOG_HEX(0, chsc_area, 32);
}
static int chsc_examine_irb(struct chsc_request *request)
{
int backed_up;
if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
return -EIO;
backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
if (scsw_cstat(&request->irb.scsw) == 0)
return 0;
if (!backed_up)
return 0;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
return -EIO;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
return -EPERM;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
return -EAGAIN;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
return -EAGAIN;
return -EIO;
}
static int chsc_ioctl_start(void __user *user_area)
{
struct chsc_request *request;
struct chsc_async_area *chsc_area;
int ret;
char dbf[10];
if (!css_general_characteristics.dynio)
/* It makes no sense to try. */
return -EOPNOTSUPP;
chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request) {
ret = -ENOMEM;
goto out_free;
}
init_completion(&request->completion);
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
ret = -EFAULT;
goto out_free;
}
chsc_log_command(chsc_area);
spin_lock_irq(&chsc_lock);
ret = chsc_async(chsc_area, request);
spin_unlock_irq(&chsc_lock);
if (ret == -EINPROGRESS) {
wait_for_completion(&request->completion);
ret = chsc_examine_irb(request);
}
/* copy area back to user */
if (!ret)
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
ret = -EFAULT;
out_free:
sprintf(dbf, "ret:%d", ret);
CHSC_LOG(0, dbf);
kfree(request);
free_page((unsigned long)chsc_area);
return ret;
}
static int chsc_ioctl_info_channel_path(void __user *user_cd)
{
struct chsc_chp_cd *cd;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 8;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpcd_area;
scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scpcd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cd, user_cd, sizeof(*cd))) {
ret = -EFAULT;
goto out_free;
}
scpcd_area->request.length = 0x0010;
scpcd_area->request.code = 0x0028;
scpcd_area->m = cd->m;
scpcd_area->fmt1 = cd->fmt;
scpcd_area->cssid = cd->chpid.cssid;
scpcd_area->first_chpid = cd->chpid.id;
scpcd_area->last_chpid = cd->chpid.id;
ccode = chsc(scpcd_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (scpcd_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "scpcd: response code=%x\n",
scpcd_area->response.code);
goto out_free;
}
memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
if (copy_to_user(user_cd, cd, sizeof(*cd)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cd);
free_page((unsigned long)scpcd_area);
return ret;
}
static int chsc_ioctl_info_cu(void __user *user_cd)
{
struct chsc_cu_cd *cd;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 8;
u32 first_cun : 8;
u32 : 24;
u32 last_cun : 8;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scucd_area;
scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scucd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cd, user_cd, sizeof(*cd))) {
ret = -EFAULT;
goto out_free;
}
scucd_area->request.length = 0x0010;
scucd_area->request.code = 0x0028;
scucd_area->m = cd->m;
scucd_area->fmt1 = cd->fmt;
scucd_area->cssid = cd->cssid;
scucd_area->first_cun = cd->cun;
scucd_area->last_cun = cd->cun;
ccode = chsc(scucd_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (scucd_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "scucd: response code=%x\n",
scucd_area->response.code);
goto out_free;
}
memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
if (copy_to_user(user_cd, cd, sizeof(*cd)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cd);
free_page((unsigned long)scucd_area);
return ret;
}
static int chsc_ioctl_info_sch_cu(void __user *user_cud)
{
struct chsc_sch_cud *cud;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 5;
u32 fmt1 : 4;
u32 : 2;
u32 ssid : 2;
u32 first_sch : 16;
u32 : 8;
u32 cssid : 8;
u32 last_sch : 16;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sscud_area;
sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sscud_area)
return -ENOMEM;
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
if (!cud) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cud, user_cud, sizeof(*cud))) {
ret = -EFAULT;
goto out_free;
}
sscud_area->request.length = 0x0010;
sscud_area->request.code = 0x0006;
sscud_area->m = cud->schid.m;
sscud_area->fmt1 = cud->fmt;
sscud_area->ssid = cud->schid.ssid;
sscud_area->first_sch = cud->schid.sch_no;
sscud_area->cssid = cud->schid.cssid;
sscud_area->last_sch = cud->schid.sch_no;
ccode = chsc(sscud_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sscud_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sscud: response code=%x\n",
sscud_area->response.code);
goto out_free;
}
memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
if (copy_to_user(user_cud, cud, sizeof(*cud)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cud);
free_page((unsigned long)sscud_area);
return ret;
}
static int chsc_ioctl_conf_info(void __user *user_ci)
{
struct chsc_conf_info *ci;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 6;
u32 ssid : 2;
u32 : 8;
u64 : 64;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sci_area;
sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sci_area)
return -ENOMEM;
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
if (!ci) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(ci, user_ci, sizeof(*ci))) {
ret = -EFAULT;
goto out_free;
}
sci_area->request.length = 0x0010;
sci_area->request.code = 0x0012;
sci_area->m = ci->id.m;
sci_area->fmt1 = ci->fmt;
sci_area->cssid = ci->id.cssid;
sci_area->ssid = ci->id.ssid;
ccode = chsc(sci_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sci_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sci: response code=%x\n",
sci_area->response.code);
goto out_free;
}
memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
if (copy_to_user(user_ci, ci, sizeof(*ci)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(ci);
free_page((unsigned long)sci_area);
return ret;
}
static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
{
struct chsc_comp_list *ccl;
int ret, ccode;
struct {
struct chsc_header request;
u32 ctype : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
u64 : 64;
u32 list_parm[2];
u64 : 64;
struct chsc_header response;
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sccl_area;
struct {
u32 m : 1;
u32 : 31;
u32 cssid : 8;
u32 : 16;
u32 chpid : 8;
} __attribute__ ((packed)) *chpid_parm;
struct {
u32 f_cssid : 8;
u32 l_cssid : 8;
u32 : 16;
u32 res;
} __attribute__ ((packed)) *cssids_parm;
sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccl_area)
return -ENOMEM;
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
if (!ccl) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
ret = -EFAULT;
goto out_free;
}
sccl_area->request.length = 0x0020;
sccl_area->request.code = 0x0030;
sccl_area->fmt = ccl->req.fmt;
sccl_area->ctype = ccl->req.ctype;
switch (sccl_area->ctype) {
case CCL_CU_ON_CHP:
case CCL_IOP_CHP:
chpid_parm = (void *)&sccl_area->list_parm;
chpid_parm->m = ccl->req.chpid.m;
chpid_parm->cssid = ccl->req.chpid.chp.cssid;
chpid_parm->chpid = ccl->req.chpid.chp.id;
break;
case CCL_CSS_IMG:
case CCL_CSS_IMG_CONF_CHAR:
cssids_parm = (void *)&sccl_area->list_parm;
cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
break;
}
ccode = chsc(sccl_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sccl_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sccl: response code=%x\n",
sccl_area->response.code);
goto out_free;
}
memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(ccl);
free_page((unsigned long)sccl_area);
return ret;
}
static int chsc_ioctl_chpd(void __user *user_chpd)
{
struct chsc_cpd_info *chpd;
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
if (!chpd)
return -ENOMEM;
if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
ret = -EFAULT;
goto out_free;
}
ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
chpd->rfmt, chpd->c, chpd->m,
&chpd->chpdb);
if (ret)
goto out_free;
if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
ret = -EFAULT;
out_free:
kfree(chpd);
return ret;
}
static int chsc_ioctl_dcal(void __user *user_dcal)
{
struct chsc_dcal *dcal;
int ret, ccode;
struct {
struct chsc_header request;
u32 atype : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
u32 res0[2];
u32 list_parm[2];
u32 res1[2];
struct chsc_header response;
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sdcal_area;
sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sdcal_area)
return -ENOMEM;
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
if (!dcal) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
ret = -EFAULT;
goto out_free;
}
sdcal_area->request.length = 0x0020;
sdcal_area->request.code = 0x0034;
sdcal_area->atype = dcal->req.atype;
sdcal_area->fmt = dcal->req.fmt;
memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
sizeof(sdcal_area->list_parm));
ccode = chsc(sdcal_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sdcal_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sdcal: response code=%x\n",
sdcal_area->response.code);
goto out_free;
}
memcpy(&dcal->sdcal, &sdcal_area->response,
sdcal_area->response.length);
if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(dcal);
free_page((unsigned long)sdcal_area);
return ret;
}
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
switch (cmd) {
case CHSC_START:
return chsc_ioctl_start((void __user *)arg);
case CHSC_INFO_CHANNEL_PATH:
return chsc_ioctl_info_channel_path((void __user *)arg);
case CHSC_INFO_CU:
return chsc_ioctl_info_cu((void __user *)arg);
case CHSC_INFO_SCH_CU:
return chsc_ioctl_info_sch_cu((void __user *)arg);
case CHSC_INFO_CI:
return chsc_ioctl_conf_info((void __user *)arg);
case CHSC_INFO_CCL:
return chsc_ioctl_conf_comp_list((void __user *)arg);
case CHSC_INFO_CPD:
return chsc_ioctl_chpd((void __user *)arg);
case CHSC_INFO_DCAL:
return chsc_ioctl_dcal((void __user *)arg);
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
}
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
};
static struct miscdevice chsc_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "chsc",
.fops = &chsc_fops,
};
static int __init chsc_misc_init(void)
{
return misc_register(&chsc_misc_device);
}
static void chsc_misc_cleanup(void)
{
misc_deregister(&chsc_misc_device);
}
static int __init chsc_sch_init(void)
{
int ret;
ret = chsc_init_dbfs();
if (ret)
return ret;
isc_register(CHSC_SCH_ISC);
ret = chsc_init_sch_driver();
if (ret)
goto out_dbf;
ret = chsc_misc_init();
if (ret)
goto out_driver;
return ret;
out_driver:
chsc_cleanup_sch_driver();
out_dbf:
isc_unregister(CHSC_SCH_ISC);
chsc_remove_dbfs();
return ret;
}
static void __exit chsc_sch_exit(void)
{
chsc_misc_cleanup();
chsc_cleanup_sch_driver();
isc_unregister(CHSC_SCH_ISC);
chsc_remove_dbfs();
}
module_init(chsc_sch_init);
module_exit(chsc_sch_exit);

View File

@@ -0,0 +1,13 @@
#ifndef _CHSC_SCH_H
#define _CHSC_SCH_H
struct chsc_request {
struct completion completion;
struct irb irb;
};
struct chsc_private {
struct chsc_request *request;
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,143 @@
#ifndef S390_CIO_H
#define S390_CIO_H
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <asm/chpid.h>
#include <asm/cio.h>
#include <asm/fcx.h>
#include <asm/schid.h>
#include "chsc.h"
/*
* path management control word
*/
struct pmcw {
u32 intparm; /* interruption parameter */
u32 qf : 1; /* qdio facility */
u32 w : 1;
u32 isc : 3; /* interruption sublass */
u32 res5 : 3; /* reserved zeros */
u32 ena : 1; /* enabled */
u32 lm : 2; /* limit mode */
u32 mme : 2; /* measurement-mode enable */
u32 mp : 1; /* multipath mode */
u32 tf : 1; /* timing facility */
u32 dnv : 1; /* device number valid */
u32 dev : 16; /* device number */
u8 lpm; /* logical path mask */
u8 pnom; /* path not operational mask */
u8 lpum; /* last path used mask */
u8 pim; /* path installed mask */
u16 mbi; /* measurement-block index */
u8 pom; /* path operational mask */
u8 pam; /* path available mask */
u8 chpid[8]; /* CHPID 0-7 (if available) */
u32 unused1 : 8; /* reserved zeros */
u32 st : 3; /* subchannel type */
u32 unused2 : 18; /* reserved zeros */
u32 mbfc : 1; /* measurement block format control */
u32 xmwme : 1; /* extended measurement word mode enable */
u32 csense : 1; /* concurrent sense; can be enabled ...*/
/* ... per MSCH, however, if facility */
/* ... is not installed, this results */
/* ... in an operand exception. */
} __attribute__ ((packed));
/* Target SCHIB configuration. */
struct schib_config {
u64 mba;
u32 intparm;
u16 mbi;
u32 isc:3;
u32 ena:1;
u32 mme:2;
u32 mp:1;
u32 csense:1;
u32 mbfc:1;
} __attribute__ ((packed));
/*
* subchannel information block
*/
struct schib {
struct pmcw pmcw; /* path management control word */
union scsw scsw; /* subchannel status word */
__u64 mba; /* measurement block address */
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
spinlock_t *lock; /* subchannel lock */
struct mutex reg_mutex;
enum {
SUBCHANNEL_TYPE_IO = 0,
SUBCHANNEL_TYPE_CHSC = 1,
SUBCHANNEL_TYPE_MSG = 2,
SUBCHANNEL_TYPE_ADM = 3,
} st; /* subchannel type */
struct {
unsigned int suspend:1; /* allow suspend */
unsigned int prefetch:1;/* deny prefetch */
unsigned int inter:1; /* suppress intermediate interrupts */
} __attribute__ ((packed)) options;
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
struct schib schib; /* subchannel information block */
int isc; /* desired interruption subclass */
struct chsc_ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
void *private; /* private per subchannel type data */
struct work_struct work;
struct schib_config config;
} __attribute__ ((aligned(8)));
#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
#define to_subchannel(n) container_of(n, struct subchannel, dev)
extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
extern int cio_enable_subchannel(struct subchannel *, u32);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
extern int cio_clear (struct subchannel *);
extern int cio_resume (struct subchannel *);
extern int cio_halt (struct subchannel *);
extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(u8 isc);
void do_IRQ(struct pt_regs *);
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
extern void *cio_get_console_priv(void);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL
#define cio_get_console_priv() NULL
#endif
#endif

View File

@@ -0,0 +1,34 @@
#ifndef CIO_DEBUG_H
#define CIO_DEBUG_H
#include <asm/debug.h>
/* for use of debug feature */
extern debug_info_t *cio_debug_msg_id;
extern debug_info_t *cio_debug_trace_id;
extern debug_info_t *cio_debug_crw_id;
#define CIO_TRACE_EVENT(imp, txt) do { \
debug_text_event(cio_debug_trace_id, imp, txt); \
} while (0)
#define CIO_MSG_EVENT(imp, args...) do { \
debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
} while (0)
#define CIO_CRW_EVENT(imp, args...) do { \
debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
} while (0)
static inline void CIO_HEX_EVENT(int level, void *data, int length)
{
if (unlikely(!cio_debug_trace_id))
return;
while (length > 0) {
debug_event(cio_debug_trace_id, level, data, length);
length -= cio_debug_trace_id->buf_size;
data += cio_debug_trace_id->buf_size;
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,159 @@
/*
* Channel report handling code
*
* Copyright IBM Corp. 2000,2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <asm/crw.h>
static struct semaphore crw_semaphore;
static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS];
/**
* crw_register_handler() - register a channel report word handler
* @rsc: reporting source code to handle
* @handler: handler to be registered
*
* Returns %0 on success and a negative error value otherwise.
*/
int crw_register_handler(int rsc, crw_handler_t handler)
{
int rc = 0;
if ((rsc < 0) || (rsc >= NR_RSCS))
return -EINVAL;
mutex_lock(&crw_handler_mutex);
if (crw_handlers[rsc])
rc = -EBUSY;
else
crw_handlers[rsc] = handler;
mutex_unlock(&crw_handler_mutex);
return rc;
}
/**
* crw_unregister_handler() - unregister a channel report word handler
* @rsc: reporting source code to handle
*/
void crw_unregister_handler(int rsc)
{
if ((rsc < 0) || (rsc >= NR_RSCS))
return;
mutex_lock(&crw_handler_mutex);
crw_handlers[rsc] = NULL;
mutex_unlock(&crw_handler_mutex);
}
/*
* Retrieve CRWs and call function to handle event.
*/
static int crw_collect_info(void *unused)
{
struct crw crw[2];
int ccode;
unsigned int chain;
int ignore;
repeat:
ignore = down_interruptible(&crw_semaphore);
chain = 0;
while (1) {
crw_handler_t handler;
if (unlikely(chain > 1)) {
struct crw tmp_crw;
printk(KERN_WARNING"%s: Code does not support more "
"than two chained crws; please report to "
"linux390@de.ibm.com!\n", __func__);
ccode = stcrw(&tmp_crw);
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
__func__, tmp_crw.slct, tmp_crw.oflw,
tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
tmp_crw.erc, tmp_crw.rsid);
printk(KERN_WARNING"%s: This was crw number %x in the "
"chain\n", __func__, chain);
if (ccode != 0)
break;
chain = tmp_crw.chn ? chain + 1 : 0;
continue;
}
ccode = stcrw(&crw[chain]);
if (ccode != 0)
break;
printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw[chain].slct, crw[chain].oflw, crw[chain].chn,
crw[chain].rsc, crw[chain].anc, crw[chain].erc,
crw[chain].rsid);
/* Check for overflows. */
if (crw[chain].oflw) {
int i;
pr_debug("%s: crw overflow detected!\n", __func__);
mutex_lock(&crw_handler_mutex);
for (i = 0; i < NR_RSCS; i++) {
if (crw_handlers[i])
crw_handlers[i](NULL, NULL, 1);
}
mutex_unlock(&crw_handler_mutex);
chain = 0;
continue;
}
if (crw[0].chn && !chain) {
chain++;
continue;
}
mutex_lock(&crw_handler_mutex);
handler = crw_handlers[crw[chain].rsc];
if (handler)
handler(&crw[0], chain ? &crw[1] : NULL, 0);
mutex_unlock(&crw_handler_mutex);
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
goto repeat;
return 0;
}
void crw_handle_channel_report(void)
{
up(&crw_semaphore);
}
/*
* Separate initcall needed for semaphore initialization since
* crw_handle_channel_report might be called before crw_machine_check_init.
*/
static int __init crw_init_semaphore(void)
{
init_MUTEX_LOCKED(&crw_semaphore);
return 0;
}
pure_initcall(crw_init_semaphore);
/*
* Machine checks for the channel subsystem must be enabled
* after the channel subsystem is initialized
*/
static int __init crw_machine_check_init(void)
{
struct task_struct *task;
task = kthread_run(crw_collect_info, NULL, "kmcheck");
if (IS_ERR(task))
return PTR_ERR(task);
ctl_set_bit(14, 28); /* enable channel report MCH */
return 0;
}
device_initcall(crw_machine_check_init);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,154 @@
#ifndef _CSS_H
#define _CSS_H
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/types.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/schid.h>
/*
* path grouping stuff
*/
#define SPID_FUNC_SINGLE_PATH 0x00
#define SPID_FUNC_MULTI_PATH 0x80
#define SPID_FUNC_ESTABLISH 0x00
#define SPID_FUNC_RESIGN 0x40
#define SPID_FUNC_DISBAND 0x20
#define SNID_STATE1_RESET 0
#define SNID_STATE1_UNGROUPED 2
#define SNID_STATE1_GROUPED 3
#define SNID_STATE2_NOT_RESVD 0
#define SNID_STATE2_RESVD_ELSE 2
#define SNID_STATE2_RESVD_SELF 3
#define SNID_STATE3_MULTI_PATH 1
#define SNID_STATE3_SINGLE_PATH 0
struct path_state {
__u8 state1 : 2; /* path state value 1 */
__u8 state2 : 2; /* path state value 2 */
__u8 state3 : 1; /* path state value 3 */
__u8 resvd : 3; /* reserved */
} __attribute__ ((packed));
struct extended_cssid {
u8 version;
u8 cssid;
} __attribute__ ((packed));
struct pgid {
union {
__u8 fc; /* SPID function code */
struct path_state ps; /* SNID path state */
} __attribute__ ((packed)) inf;
union {
__u32 cpu_addr : 16; /* CPU address */
struct extended_cssid ext_cssid;
} __attribute__ ((packed)) pgid_high;
__u32 cpu_id : 24; /* CPU identification */
__u32 cpu_model : 16; /* CPU model */
__u32 tod_high; /* high word TOD clock */
} __attribute__ ((packed));
struct subchannel;
struct chp_link;
/**
* struct css_driver - device driver for subchannels
* @owner: owning module
* @subchannel_type: subchannel type supported by this driver
* @drv: embedded device driver structure
* @irq: called on interrupts
* @chp_event: called for events affecting a channel path
* @sch_event: called for events affecting the subchannel
* @probe: function called on probe
* @remove: function called on remove
* @shutdown: called at device shutdown
* @prepare: prepare for pm state transition
* @complete: undo work done in @prepare
* @freeze: callback for freezing during hibernation snapshotting
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @settle: wait for asynchronous work to finish
* @name: name of the device driver
*/
struct css_driver {
struct module *owner;
struct css_device_id *subchannel_type;
struct device_driver drv;
void (*irq)(struct subchannel *);
int (*chp_event)(struct subchannel *, struct chp_link *, int);
int (*sch_event)(struct subchannel *, int);
int (*probe)(struct subchannel *);
int (*remove)(struct subchannel *);
void (*shutdown)(struct subchannel *);
int (*prepare) (struct subchannel *);
void (*complete) (struct subchannel *);
int (*freeze)(struct subchannel *);
int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *);
void (*settle)(void);
const char *name;
};
#define to_cssdriver(n) container_of(n, struct css_driver, drv)
/*
* all css_drivers have the css_bus_type
*/
extern struct bus_type css_bus_type;
extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
extern int css_probe_device(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
#define __MAX_SUBCHANNEL 65535
#define __MAX_SSID 3
struct channel_subsystem {
u8 cssid;
int valid;
struct channel_path *chps[__MAX_CHPID + 1];
struct device device;
struct pgid global_pgid;
struct mutex mutex;
/* channel measurement related */
int cm_enabled;
void *cub_addr1;
void *cub_addr2;
/* for orphaned ccw devices */
struct subchannel *pseudo_subchannel;
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
extern struct bus_type css_bus_type;
extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
int sch_is_pseudo_sch(struct subchannel *);
struct schib;
int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq;
void css_wait_for_slow_path(void);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,142 @@
#ifndef S390_DEVICE_H
#define S390_DEVICE_H
#include <asm/ccwdev.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include "io_sch.h"
/*
* states of the device statemachine
*/
enum dev_state {
DEV_STATE_NOT_OPER,
DEV_STATE_SENSE_PGID,
DEV_STATE_SENSE_ID,
DEV_STATE_OFFLINE,
DEV_STATE_VERIFY,
DEV_STATE_ONLINE,
DEV_STATE_W4SENSE,
DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED,
/* states to wait for i/o completion before doing something */
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
DEV_STATE_DISCONNECTED,
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
DEV_STATE_CMFUPDATE,
/* last element! */
NR_DEV_STATES
};
/*
* asynchronous events of the device statemachine
*/
enum dev_event {
DEV_EVENT_NOTOPER,
DEV_EVENT_INTERRUPT,
DEV_EVENT_TIMEOUT,
DEV_EVENT_VERIFY,
/* last element! */
NR_DEV_EVENTS
};
struct ccw_device;
/*
* action called through jumptable
*/
typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
static inline void
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
{
dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
}
/*
* Delivers 1 if the device state is final.
*/
static inline int
dev_fsm_final_state(struct ccw_device *cdev)
{
return (cdev->private->state == DEV_STATE_NOT_OPER ||
cdev->private->state == DEV_STATE_OFFLINE ||
cdev->private->state == DEV_STATE_ONLINE ||
cdev->private->state == DEV_STATE_BOXED);
}
extern struct workqueue_struct *ccw_device_work;
extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);
void io_subchannel_init_config(struct subchannel *sch);
int ccw_device_cancel_halt_clear(struct ccw_device *);
void ccw_device_do_unbind_bind(struct work_struct *);
void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
void ccw_device_update_sense_data(struct ccw_device *);
int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
int ccw_device_do_sense(struct ccw_device *, struct irb *);
/* Function prototypes for sense id stuff. */
void ccw_device_sense_id_start(struct ccw_device *);
void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_id_done(struct ccw_device *, int);
/* Function prototypes for path grouping stuff. */
void ccw_device_sense_pgid_start(struct ccw_device *);
void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_pgid_done(struct ccw_device *, int);
void ccw_device_verify_start(struct ccw_device *);
void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
void ccw_device_verify_done(struct ccw_device *, int);
void ccw_device_disband_start(struct ccw_device *);
void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
void ccw_device_disband_done(struct ccw_device *, int);
int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
/* Helper function for machine check handling. */
void ccw_device_trigger_reprobe(struct ccw_device *);
void ccw_device_kill_io(struct ccw_device *);
int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
extern struct bus_type ccw_bus_type;
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *);
int ccw_set_cmf(struct ccw_device *cdev, int enable);
extern struct device_attribute dev_attr_cmb_enable;
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,323 @@
/*
* drivers/s390/cio/device_id.c
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Sense ID functions.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/ccwdev.h>
#include <asm/delay.h>
#include <asm/cio.h>
#include <asm/lowcore.h>
#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "io_sch.h"
/**
* vm_vdev_to_cu_type - Convert vm virtual device into control unit type
* for certain devices.
* @class: virtual device class
* @type: virtual device type
*
* Returns control unit type if a match was made or %0xffff otherwise.
*/
static int vm_vdev_to_cu_type(int class, int type)
{
static struct {
int class, type, cu_type;
} vm_devices[] = {
{ 0x08, 0x01, 0x3480 },
{ 0x08, 0x02, 0x3430 },
{ 0x08, 0x10, 0x3420 },
{ 0x08, 0x42, 0x3424 },
{ 0x08, 0x44, 0x9348 },
{ 0x08, 0x81, 0x3490 },
{ 0x08, 0x82, 0x3422 },
{ 0x10, 0x41, 0x1403 },
{ 0x10, 0x42, 0x3211 },
{ 0x10, 0x43, 0x3203 },
{ 0x10, 0x45, 0x3800 },
{ 0x10, 0x47, 0x3262 },
{ 0x10, 0x48, 0x3820 },
{ 0x10, 0x49, 0x3800 },
{ 0x10, 0x4a, 0x4245 },
{ 0x10, 0x4b, 0x4248 },
{ 0x10, 0x4d, 0x3800 },
{ 0x10, 0x4e, 0x3820 },
{ 0x10, 0x4f, 0x3820 },
{ 0x10, 0x82, 0x2540 },
{ 0x10, 0x84, 0x3525 },
{ 0x20, 0x81, 0x2501 },
{ 0x20, 0x82, 0x2540 },
{ 0x20, 0x84, 0x3505 },
{ 0x40, 0x01, 0x3278 },
{ 0x40, 0x04, 0x3277 },
{ 0x40, 0x80, 0x2250 },
{ 0x40, 0xc0, 0x5080 },
{ 0x80, 0x00, 0x3215 },
};
int i;
for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
if (class == vm_devices[i].class && type == vm_devices[i].type)
return vm_devices[i].cu_type;
return 0xffff;
}
/**
* diag_get_dev_info - retrieve device information via DIAG X'210'
* @devno: device number
* @ps: pointer to sense ID data area
*
* Returns zero on success, non-zero otherwise.
*/
static int diag_get_dev_info(u16 devno, struct senseid *ps)
{
struct diag210 diag_data;
int ccode;
CIO_TRACE_EVENT (4, "VMvdinf");
diag_data = (struct diag210) {
.vrdcdvno = devno,
.vrdclen = sizeof (diag_data),
};
ccode = diag210 (&diag_data);
if ((ccode == 0) || (ccode == 2)) {
ps->reserved = 0xff;
/* Special case for osa devices. */
if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) {
ps->cu_type = 0x3088;
ps->cu_model = 0x60;
return 0;
}
ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla,
diag_data.vrdcvtyp);
if (ps->cu_type != 0xffff)
return 0;
}
CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
"vdev class : %02X, vdev type : %04X \n ... "
"rdev class : %02X, rdev type : %04X, "
"rdev model: %02X\n",
devno, ccode,
diag_data.vrdcvcla, diag_data.vrdcvtyp,
diag_data.vrdcrccl, diag_data.vrdccrty,
diag_data.vrdccrmd);
return -ENODEV;
}
/*
* Start Sense ID helper function.
* Try to obtain the 'control unit'/'device type' information
* associated with the subchannel.
*/
static int
__ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup sense channel program. */
ccw = cdev->private->iccws;
ccw->cmd_code = CCW_CMD_SENSE_ID;
ccw->cda = (__u32) __pa (&cdev->private->senseid);
ccw->count = sizeof (struct senseid);
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try on every path. */
ret = -ENODEV;
while (cdev->private->imask != 0) {
cdev->private->senseid.cu_type = 0xFFFF;
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EACCES)
return ret;
}
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
}
return ret;
}
void
ccw_device_sense_id_start(struct ccw_device *cdev)
{
int ret;
memset (&cdev->private->senseid, 0, sizeof (struct senseid));
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
}
/*
* Called from interrupt context to check if a valid answer
* to Sense ID was received.
*/
static int
ccw_device_check_sense_id(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
/* Check the error cases. */
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Sense ID if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
return -EAGAIN;
}
return -ETIME;
}
if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
/*
* if the device doesn't support the SenseID
* command further retries wouldn't help ...
* NB: We don't check here for intervention required like we
* did before, because tape devices with no tape inserted
* may present this status *in conjunction with* the
* sense id information. So, for intervention required,
* we use the "whack it until it talks" strategy...
*/
CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
"0.%x.%04x reports cmd reject\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no);
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
"lpum %02X, cnt %02d, sns :"
" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cmd.cc == 3) {
u8 lpm;
lpm = to_io_private(sch)->orb.cmd.lpm;
if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
"on subchannel 0.%x.%04x is "
"'not operational'\n", lpm,
cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
return -EACCES;
}
/* Did we get a proper answer ? */
if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
cdev->private->senseid.reserved == 0xFF) {
if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
cdev->private->flags.esid = 1;
return 0; /* Success */
}
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
"subchannel 0.%x.%04x returns status %02X%02X\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no,
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
return -EAGAIN;
}
/*
* Got interrupt for Sense ID.
*/
void
ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
sch = to_subchannel(cdev->dev.parent);
irb = (struct irb *) __LC_IRB;
/* Retry sense id, if needed. */
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
}
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
ret = ccw_device_check_sense_id(cdev);
memset(&cdev->private->irb, 0, sizeof(struct irb));
switch (ret) {
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
case 0: /* Sense id succeeded. */
case -ETIME: /* Sense id stopped by timeout. */
ccw_device_sense_id_done(cdev, ret);
break;
case -EACCES: /* channel is not operational. */
sch->lpm &= ~cdev->private->imask;
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
/* fall through. */
case -EAGAIN: /* try again. */
ret = __ccw_device_sense_id_start(cdev);
if (ret == 0 || ret == -EBUSY)
break;
/* fall through. */
default: /* Sense ID failed. Try asking VM. */
if (MACHINE_IS_VM)
ret = diag_get_dev_info(cdev->private->dev_id.devno,
&cdev->private->senseid);
else
/*
* If we can't couldn't identify the device type we
* consider the device "not operational".
*/
ret = -ENODEV;
ccw_device_sense_id_done(cdev, ret);
break;
}
}

View File

@@ -0,0 +1,716 @@
/*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
#include <asm/chpid.h>
#include <asm/fcx.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "chsc.h"
#include "device.h"
#include "chp.h"
/**
* ccw_device_set_options_mask() - set some options and unset the rest
* @cdev: device for which the options are to be set
* @flags: options to be set
*
* All flags specified in @flags are set, all flags not specified in @flags
* are cleared.
* Returns:
* %0 on success, -%EINVAL on an invalid flag combination.
*/
int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
{
/*
* The flag usage is mutal exclusive ...
*/
if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
(flags & CCWDEV_REPORT_ALL))
return -EINVAL;
cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
return 0;
}
/**
* ccw_device_set_options() - set some options
* @cdev: device for which the options are to be set
* @flags: options to be set
*
* All flags specified in @flags are set, the remainder is left untouched.
* Returns:
* %0 on success, -%EINVAL if an invalid flag combination would ensue.
*/
int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
{
/*
* The flag usage is mutal exclusive ...
*/
if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
(flags & CCWDEV_REPORT_ALL)) ||
((flags & CCWDEV_EARLY_NOTIFICATION) &&
cdev->private->options.repall) ||
((flags & CCWDEV_REPORT_ALL) &&
cdev->private->options.fast))
return -EINVAL;
cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
return 0;
}
/**
* ccw_device_clear_options() - clear some options
* @cdev: device for which the options are to be cleared
* @flags: options to be cleared
*
* All flags specified in @flags are cleared, the remainder is left untouched.
*/
void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
{
cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
}
/**
* ccw_device_clear() - terminate I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter; value is only used if no I/O is
* outstanding, otherwise the intparm associated with the I/O request
* is returned
*
* ccw_device_clear() calls csch on @cdev's subchannel.
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
* -%EINVAL on invalid device state.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
ret = cio_clear(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
/**
* ccw_device_start_key() - start a s390 channel program with key
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @key: storage key to be used for the I/O
* @flags: additional flags; defines the action to be performed for I/O
* processing.
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, __u8 key,
unsigned long flags)
{
struct subchannel *sch;
int ret;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state == DEV_STATE_VERIFY ||
cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = 1;
cdev->private->intparm = intparm;
return 0;
} else
/* There's already a fake I/O around. */
return -EBUSY;
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
cdev->private->flags.doverify)
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
return ret;
/* Adjust requested path mask to excluded varied off paths. */
if (lpm) {
lpm &= sch->opm;
if (lpm == 0)
return -EACCES;
}
ret = cio_start_key (sch, cpa, lpm, key);
switch (ret) {
case 0:
cdev->private->intparm = intparm;
break;
case -EACCES:
case -ENODEV:
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
break;
}
return ret;
}
/**
* ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @key: storage key to be used for the I/O
* @flags: additional flags; defines the action to be performed for I/O
* processing.
* @expires: timeout value in jiffies
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* This function notifies the device driver if the channel program has not
* completed during the time specified by @expires. If a timeout occurs, the
* channel program is terminated via xsch, hsch or csch, and the device's
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, __u8 key,
unsigned long flags, int expires)
{
int ret;
if (!cdev)
return -ENODEV;
ccw_device_set_timeout(cdev, expires);
ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
if (ret != 0)
ccw_device_set_timeout(cdev, 0);
return ret;
}
/**
* ccw_device_start() - start a s390 channel program
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @flags: additional flags; defines the action to be performed for I/O
* processing.
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, unsigned long flags)
{
return ccw_device_start_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags);
}
/**
* ccw_device_start_timeout() - start a s390 channel program with timeout
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @flags: additional flags; defines the action to be performed for I/O
* processing.
* @expires: timeout value in jiffies
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* This function notifies the device driver if the channel program has not
* completed during the time specified by @expires. If a timeout occurs, the
* channel program is terminated via xsch, hsch or csch, and the device's
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm,
unsigned long flags, int expires)
{
return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags,
expires);
}
/**
* ccw_device_halt() - halt I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter; value is only used if no I/O is
* outstanding, otherwise the intparm associated with the I/O request
* is returned
*
* ccw_device_halt() calls hsch on @cdev's subchannel.
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
* -%EINVAL on invalid device state,
* -%EBUSY on device busy or interrupt pending.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
ret = cio_halt(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
/**
* ccw_device_resume() - resume channel program execution
* @cdev: target ccw device
*
* ccw_device_resume() calls rsch on @cdev's subchannel.
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
* -%EINVAL on invalid device state,
* -%EBUSY on device busy or interrupt pending.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
!(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
return -EINVAL;
return cio_resume(sch);
}
/*
* Pass interrupt to device driver.
*/
int
ccw_device_call_handler(struct ccw_device *cdev)
{
struct subchannel *sch;
unsigned int stctl;
int ending_status;
sch = to_subchannel(cdev->dev.parent);
/*
* we allow for the device action handler if .
* - we received ending status
* - the action handler requested to see all interrupts
* - we received an intermediate status
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
stctl = scsw_stctl(&cdev->private->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
if (!ending_status &&
!cdev->private->options.repall &&
!(stctl & SCSW_STCTL_INTER_STATUS) &&
!(cdev->private->options.fast &&
(stctl & SCSW_STCTL_PRIM_STATUS)))
return 0;
/* Clear pending timers for device driver initiated I/O. */
if (ending_status)
ccw_device_set_timeout(cdev, 0);
/*
* Now we are ready to call the device driver interrupt handler.
*/
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->irb);
/*
* Clear the old and now useless interrupt response block.
*/
memset(&cdev->private->irb, 0, sizeof(struct irb));
return 1;
}
/**
* ccw_device_get_ciw() - Search for CIW command in extended sense data.
* @cdev: ccw device to inspect
* @ct: command type to look for
*
* During SenseID, command information words (CIWs) describing special
* commands available to the device may have been stored in the extended
* sense data. This function searches for CIWs of a specified command
* type in the extended sense data.
* Returns:
* %NULL if no extended sense data has been stored or if no CIW of the
* specified command type could be found,
* else a pointer to the CIW of the specified command type.
*/
struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
{
int ciw_cnt;
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
return cdev->private->senseid.ciw + ciw_cnt;
return NULL;
}
/**
* ccw_device_get_path_mask() - get currently available paths
* @cdev: ccw device to be queried
* Returns:
* %0 if no subchannel for the device is available,
* else the mask of currently available paths for the ccw device's subchannel.
*/
__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
if (!cdev->dev.parent)
return 0;
sch = to_subchannel(cdev->dev.parent);
return sch->lpm;
}
/*
* Try to break the lock on a boxed device.
*/
int
ccw_device_stlck(struct ccw_device *cdev)
{
void *buf, *buf2;
unsigned long flags;
struct subchannel *sch;
int ret;
if (!cdev)
return -ENODEV;
if (cdev->drv && !cdev->private->options.force)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT(2, "stl lock");
CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf2) {
kfree(buf);
return -ENOMEM;
}
spin_lock_irqsave(sch->lock, flags);
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
if (ret)
goto out_unlock;
/*
* Setup ccw. We chain an unconditional reserve and a release so we
* only break the lock.
*/
cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
cdev->private->iccws[0].cda = (__u32) __pa(buf);
cdev->private->iccws[0].count = 32;
cdev->private->iccws[0].flags = CCW_FLAG_CC;
cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
cdev->private->iccws[1].cda = (__u32) __pa(buf2);
cdev->private->iccws[1].count = 32;
cdev->private->iccws[1].flags = 0;
ret = cio_start(sch, cdev->private->iccws, 0);
if (ret) {
cio_disable_subchannel(sch); //FIXME: return code?
goto out_unlock;
}
cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q,
cdev->private->irb.scsw.cmd.actl == 0);
spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.cmd.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(cdev->private->irb.scsw.cmd.cstat != 0))
ret = -EIO;
/* Clear irb. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
out_unlock:
kfree(buf);
kfree(buf2);
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
{
struct subchannel *sch;
struct chp_id chpid;
sch = to_subchannel(cdev->dev.parent);
chp_id_init(&chpid);
chpid.id = sch->schib.pmcw.chpid[chp_no];
return chp_get_chp_desc(chpid);
}
/**
* ccw_device_get_id - obtain a ccw device id
* @cdev: device to obtain the id for
* @dev_id: where to fill in the values
*/
void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
{
*dev_id = cdev->private->dev_id;
}
EXPORT_SYMBOL(ccw_device_get_id);
/**
* ccw_device_tm_start_key - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @key: storage key to use for storage access
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key)
{
struct subchannel *sch;
int rc;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
/* Adjust requested path mask to excluded varied off paths. */
if (lpm) {
lpm &= sch->opm;
if (lpm == 0)
return -EACCES;
}
rc = cio_tm_start_key(sch, tcw, lpm, key);
if (rc == 0)
cdev->private->intparm = intparm;
return rc;
}
EXPORT_SYMBOL(ccw_device_tm_start_key);
/**
* ccw_device_tm_start_timeout_key - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @key: storage key to use for storage access
* @expires: time span in jiffies after which to abort request
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key,
int expires)
{
int ret;
ccw_device_set_timeout(cdev, expires);
ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
if (ret != 0)
ccw_device_set_timeout(cdev, 0);
return ret;
}
EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
/**
* ccw_device_tm_start - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm)
{
return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
PAGE_DEFAULT_KEY);
}
EXPORT_SYMBOL(ccw_device_tm_start);
/**
* ccw_device_tm_start_timeout - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @expires: time span in jiffies after which to abort request
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, int expires)
{
return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
PAGE_DEFAULT_KEY, expires);
}
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
* ccw_device_tm_intrg - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
* Perform an interrogate function on the given ccw device. Return zero on
* success, non-zero otherwise.
*/
int ccw_device_tm_intrg(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
if (!scsw_is_tm(&sch->schib.scsw) ||
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
return -EINVAL;
return cio_tm_intrg(sch);
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
// FIXME: these have to go:
int
_ccw_device_get_subchannel_number(struct ccw_device *cdev)
{
return cdev->private->schid.sch_no;
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
EXPORT_SYMBOL(ccw_device_clear);
EXPORT_SYMBOL(ccw_device_halt);
EXPORT_SYMBOL(ccw_device_resume);
EXPORT_SYMBOL(ccw_device_start_timeout);
EXPORT_SYMBOL(ccw_device_start);
EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);

View File

@@ -0,0 +1,594 @@
/*
* drivers/s390/cio/device_pgid.c
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Path Group ID functions.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/lowcore.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "io_sch.h"
/*
* Helper function called from interrupt context to decide whether an
* operation should be tried again.
*/
static int __ccw_device_should_retry(union scsw *scsw)
{
/* CC is only valid if start function bit is set. */
if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
return 1;
/* No more activity. For sense and set PGID we stubbornly try again. */
if (!scsw->cmd.actl)
return 1;
return 0;
}
/*
* Start Sense Path Group ID helper function. Used in ccw_device_recog
* and ccw_device_sense_pgid.
*/
static int
__ccw_device_sense_pgid_start(struct ccw_device *cdev)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
int i;
sch = to_subchannel(cdev->dev.parent);
/* Return if we already checked on all paths. */
if (cdev->private->imask == 0)
return (sch->lpm == 0) ? -ENODEV : -EACCES;
i = 8 - ffs(cdev->private->imask);
/* Setup sense path group id channel program. */
ccw = cdev->private->iccws;
ccw->cmd_code = CCW_CMD_SENSE_PGID;
ccw->count = sizeof (struct pgid);
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try on every path. */
ret = -ENODEV;
while (cdev->private->imask != 0) {
/* Try every path multiple times. */
ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EACCES)
return ret;
CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not "
"operational'\n",
cdev->private->dev_id.devno,
sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
}
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
i++;
}
return ret;
}
void
ccw_device_sense_pgid_start(struct ccw_device *cdev)
{
int ret;
/* Set a timeout of 60s */
ccw_device_set_timeout(cdev, 60*HZ);
cdev->private->state = DEV_STATE_SENSE_PGID;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid));
ret = __ccw_device_sense_pgid_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
}
/*
* Called from interrupt context to check if a valid answer
* to Sense Path Group ID was received.
*/
static int
__ccw_device_check_sense_pgid(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
int i;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Sense PGID if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
return -EAGAIN;
}
return -ETIME;
}
if (irb->esw.esw0.erw.cons &&
(irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
/*
* If the device doesn't support the Sense Path Group ID
* command further retries wouldn't help ...
*/
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
"lpum %02X, cnt %02d, sns : "
"%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cmd.cc == 3) {
u8 lpm;
lpm = to_io_private(sch)->orb.cmd.lpm;
CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, lpm);
return -EACCES;
}
i = 8 - ffs(cdev->private->imask);
if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
"is reserved by someone else\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no);
return -EUSERS;
}
return 0;
}
/*
* Got interrupt for Sense Path Group ID.
*/
void
ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
irb = (struct irb *) __LC_IRB;
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (__ccw_device_should_retry(&irb->scsw)) {
ret = __ccw_device_sense_pgid_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
}
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
ret = __ccw_device_check_sense_pgid(cdev);
memset(&cdev->private->irb, 0, sizeof(struct irb));
switch (ret) {
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
case -EOPNOTSUPP: /* Sense Path Group ID not supported */
ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
break;
case -ETIME: /* Sense path group id stopped by timeout. */
ccw_device_sense_pgid_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
sch->lpm &= ~cdev->private->imask;
/* Fall through. */
case 0: /* Sense Path Group ID successful. */
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
/* Fall through. */
case -EAGAIN: /* Try again. */
ret = __ccw_device_sense_pgid_start(cdev);
if (ret != 0 && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
break;
case -EUSERS: /* device is reserved for someone else. */
ccw_device_sense_pgid_done(cdev, -EUSERS);
break;
}
}
/*
* Path Group ID helper function.
*/
static int
__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup sense path group id channel program. */
cdev->private->pgid[0].inf.fc = func;
ccw = cdev->private->iccws;
if (cdev->private->flags.pgid_single)
cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH;
else
cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
ccw->cmd_code = CCW_CMD_SET_PGID;
ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
ccw->count = sizeof (struct pgid);
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try multiple times. */
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
* indication. */
if ((ret == 0) || (ret == -EBUSY))
return ret;
}
/* PGID command failed on this path. */
CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return ret;
}
/*
* Helper function to send a nop ccw down a path.
*/
static int __ccw_device_do_nop(struct ccw_device *cdev)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup nop channel program. */
ccw = cdev->private->iccws;
ccw->cmd_code = CCW_CMD_NOOP;
ccw->cda = 0;
ccw->count = 0;
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try multiple times. */
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
* indication. */
if ((ret == 0) || (ret == -EBUSY))
return ret;
}
/* nop command failed on this path. */
CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return ret;
}
/*
* Called from interrupt context to check if a valid answer
* to Set Path Group ID was received.
*/
static int
__ccw_device_check_pgid(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Set PGID if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
return -EAGAIN;
}
return -ETIME;
}
if (irb->esw.esw0.erw.cons) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
"cnt %02d, "
"sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cmd.cc == 3) {
CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return -EACCES;
}
return 0;
}
/*
* Called from interrupt context to check the path status after a nop has
* been send.
*/
static int __ccw_device_check_nop(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry NOP if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
return -EAGAIN;
}
return -ETIME;
}
if (irb->scsw.cmd.cc == 3) {
CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return -EACCES;
}
return 0;
}
static void
__ccw_device_verify_start(struct ccw_device *cdev)
{
struct subchannel *sch;
__u8 func;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Repeat for all paths. */
for (; cdev->private->imask; cdev->private->imask >>= 1,
cdev->private->iretry = 5) {
if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
/* Path not available, try next. */
continue;
if (cdev->private->options.pgroup) {
if (sch->opm & cdev->private->imask)
func = SPID_FUNC_ESTABLISH;
else
func = SPID_FUNC_RESIGN;
ret = __ccw_device_do_pgid(cdev, func);
} else
ret = __ccw_device_do_nop(cdev);
/* We expect an interrupt in case of success or busy
* indication. */
if (ret == 0 || ret == -EBUSY)
return;
/* Permanent path failure, try next. */
}
/* Done with all paths. */
ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
}
/*
* Got interrupt for Set Path Group ID.
*/
void
ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
irb = (struct irb *) __LC_IRB;
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (__ccw_device_should_retry(&irb->scsw))
__ccw_device_verify_start(cdev);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
if (cdev->private->options.pgroup)
ret = __ccw_device_check_pgid(cdev);
else
ret = __ccw_device_check_nop(cdev);
memset(&cdev->private->irb, 0, sizeof(struct irb));
switch (ret) {
/* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
case 0:
/* Path verification ccw finished successfully, update lpm. */
sch->vpm |= sch->opm & cdev->private->imask;
/* Go on with next path. */
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
break;
case -EOPNOTSUPP:
/*
* One of those strange devices which claim to be able
* to do multipathing but not for Set Path Group ID.
*/
if (cdev->private->flags.pgid_single)
cdev->private->options.pgroup = 0;
else
cdev->private->flags.pgid_single = 1;
/* Retry */
sch->vpm = 0;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
/* fall through. */
case -EAGAIN: /* Try again. */
__ccw_device_verify_start(cdev);
break;
case -ETIME: /* Set path group id stopped by timeout. */
ccw_device_verify_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
break;
}
}
void
ccw_device_verify_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
cdev->private->flags.pgid_single = 0;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
/* Start with empty vpm. */
sch->vpm = 0;
/* Get current pam. */
if (cio_update_schib(sch)) {
ccw_device_verify_done(cdev, -ENODEV);
return;
}
/* After 60s path verification is considered to have failed. */
ccw_device_set_timeout(cdev, 60*HZ);
__ccw_device_verify_start(cdev);
}
static void
__ccw_device_disband_start(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(cdev->dev.parent);
while (cdev->private->imask != 0) {
if (sch->lpm & cdev->private->imask) {
ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
if (ret == 0)
return;
}
cdev->private->iretry = 5;
cdev->private->imask >>= 1;
}
ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
}
/*
* Got interrupt for Unset Path Group ID.
*/
void
ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
irb = (struct irb *) __LC_IRB;
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (__ccw_device_should_retry(&irb->scsw))
__ccw_device_disband_start(cdev);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
ret = __ccw_device_check_pgid(cdev);
memset(&cdev->private->irb, 0, sizeof(struct irb));
switch (ret) {
/* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
case 0: /* disband successful. */
ccw_device_disband_done(cdev, ret);
break;
case -EOPNOTSUPP:
/*
* One of those strange devices which claim to be able
* to do multipathing but not for Unset Path Group ID.
*/
cdev->private->flags.pgid_single = 1;
/* fall through. */
case -EAGAIN: /* Try again. */
__ccw_device_disband_start(cdev);
break;
case -ETIME: /* Set path group id stopped by timeout. */
ccw_device_disband_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
__ccw_device_disband_start(cdev);
break;
}
}
void
ccw_device_disband_start(struct ccw_device *cdev)
{
/* After 60s disbanding is considered to have failed. */
ccw_device_set_timeout(cdev, 60*HZ);
cdev->private->flags.pgid_single = 0;
cdev->private->iretry = 5;
cdev->private->imask = 0x80;
__ccw_device_disband_start(cdev);
}

View File

@@ -0,0 +1,403 @@
/*
* drivers/s390/cio/device_status.c
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Status accumulation and basic sense functions.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "io_sch.h"
/*
* Check for any kind of channel or interface control check but don't
* issue the message for the console device
*/
static void
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
{
char dbf_text[15];
if (!scsw_is_valid_cstat(&irb->scsw) ||
!(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
return;
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
" ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
cdev->private->dev_id.devno, cdev->private->schid.ssid,
cdev->private->schid.sch_no,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof(struct irb));
}
/*
* Some paths became not operational (pno bit in scsw is set).
*/
static void
ccw_device_path_notoper(struct ccw_device *cdev)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (cio_update_schib(sch))
goto doverify;
CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
"not operational \n", __func__,
sch->schid.ssid, sch->schid.sch_no,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
doverify:
cdev->private->flags.doverify = 1;
}
/*
* Copy valid bits from the extended control word to device irb.
*/
static void
ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
{
/*
* Copy extended control bit if it is valid... yes there
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
cdev->private->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
if (!cdev->private->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
}
/*
* Check if extended status word is valid.
*/
static int
ccw_device_accumulate_esw_valid(struct irb *irb)
{
if (!irb->scsw.cmd.eswf &&
(irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
return 0;
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
!(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
return 0;
return 1;
}
/*
* Copy valid bits from the extended status word to device irb.
*/
static void
ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
{
struct irb *cdev_irb;
struct sublog *cdev_sublog, *sublog;
if (!ccw_device_accumulate_esw_valid(irb))
return;
cdev_irb = &cdev->private->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
/* Copy subchannel logout information if esw is of format 0. */
if (irb->scsw.cmd.eswf) {
cdev_sublog = &cdev_irb->esw.esw0.sublog;
sublog = &irb->esw.esw0.sublog;
/* Copy extended status flags. */
cdev_sublog->esf = sublog->esf;
/*
* Copy fields that have a meaning for channel data check
* channel control check and interface control check.
*/
if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)) {
/* Copy ancillary report bit. */
cdev_sublog->arep = sublog->arep;
/* Copy field-validity-flags. */
cdev_sublog->fvf = sublog->fvf;
/* Copy storage access code. */
cdev_sublog->sacc = sublog->sacc;
/* Copy termination code. */
cdev_sublog->termc = sublog->termc;
/* Copy sequence code. */
cdev_sublog->seqc = sublog->seqc;
}
/* Copy device status check. */
cdev_sublog->devsc = sublog->devsc;
/* Copy secondary error. */
cdev_sublog->serr = sublog->serr;
/* Copy i/o-error alert. */
cdev_sublog->ioerr = sublog->ioerr;
/* Copy channel path timeout bit. */
if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
/* Copy failing storage address validity flag. */
cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
if (cdev_irb->esw.esw0.erw.fsavf) {
/* ... and copy the failing storage address. */
memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
sizeof (irb->esw.esw0.faddr));
/* ... and copy the failing storage address format. */
cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
}
/* Copy secondary ccw address validity bit. */
cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
if (irb->esw.esw0.erw.scavf)
/* ... and copy the secondary ccw address. */
cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
}
/* FIXME: DCTI for format 2? */
/* Copy authorization bit. */
cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
/* Copy path verification required flag. */
cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
if (irb->esw.esw0.erw.pvrf)
cdev->private->flags.doverify = 1;
/* Copy concurrent sense bit. */
cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
if (irb->esw.esw0.erw.cons)
cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
}
/*
* Accumulate status from irb to devstat.
*/
void
ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
{
struct irb *cdev_irb;
/*
* Check if the status pending bit is set in stctl.
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
memcpy(&cdev->private->irb, irb, sizeof(struct irb));
return;
}
/*
* Don't accumulate unsolicited interrupts.
*/
if (!scsw_is_solicited(&irb->scsw))
return;
cdev_irb = &cdev->private->irb;
/*
* If the clear function had been performed, all formerly pending
* status at the subchannel has been cleared and we must not pass
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
/* Copy key. */
cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
/* Copy suspend control bit. */
cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
/* Accumulate deferred condition code. */
cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
/* Copy ccw format bit. */
cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
/* Copy prefetch bit. */
cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
/* Copy initial-status-interruption-control. */
cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
/* Copy address limit checking control. */
cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
/* Copy suppress suspend bit. */
cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
}
/* Take care of the extended control bit and extended control word. */
ccw_device_accumulate_ecw(cdev, irb);
/* Accumulate function control. */
cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
/* Copy activity control. */
cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
/* Accumulate status control. */
cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
/*
* Copy ccw address if it is valid. This is a bit simplified
* but should be close enough for all practical purposes.
*/
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
((irb->scsw.cmd.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
(irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
(irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
/* Accumulate device status, but not the device busy flag. */
cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
/* dstat is not always valid. */
if (irb->scsw.cmd.stctl &
(SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
| SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
/* Accumulate subchannel status. */
cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
/* Copy residual count if it is valid. */
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
(irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
== 0)
cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
/* Take care of bits in the extended status word. */
ccw_device_accumulate_esw(cdev, irb);
/*
* Check whether we must issue a SENSE CCW ourselves if there is no
* concurrent sense facility installed for the subchannel.
* No sense is required if no delayed sense is pending
* and we did not get a unit check without sense information.
*
* Note: We should check for ioinfo[irq]->flags.consns but VM
* violates the ESA/390 architecture and doesn't present an
* operand exception for virtual devices without concurrent
* sense facility available/supported when enabling the
* concurrent sense facility.
*/
if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
!(cdev_irb->esw.esw0.erw.cons))
cdev->private->flags.dosense = 1;
}
/*
* Do a basic sense.
*/
int
ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
{
struct subchannel *sch;
struct ccw1 *sense_ccw;
int rc;
sch = to_subchannel(cdev->dev.parent);
/* A sense is required, can we do it now ? */
if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
/*
* we received an Unit Check but we have no final
* status yet, therefore we must delay the SENSE
* processing. We must not report this intermediate
* status to the device interrupt handler.
*/
return -EBUSY;
/*
* We have ending status but no sense information. Do a basic sense.
*/
sense_ccw = &to_io_private(sch)->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return rc;
}
/*
* Add information from basic sense to devstat.
*/
void
ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
{
/*
* Check if the status pending bit is set in stctl.
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
ccw_device_path_notoper(cdev);
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
if (ccw_device_accumulate_esw_valid(irb) &&
irb->esw.esw0.erw.pvrf)
cdev->private->flags.doverify = 1;
}
/*
* This function accumulates the status into the private devstat and
* starts a basic sense if one is needed.
*/
int
ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
{
ccw_device_accumulate_irb(cdev, irb);
if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
return -EBUSY;
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
if (cdev->private->flags.dosense) {
ccw_device_do_sense(cdev, irb);
return -EBUSY;
}
return 0;
}

View File

@@ -0,0 +1,350 @@
/*
* Functions for assembling fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <asm/fcx.h>
#include "cio.h"
/**
* tcw_get_intrg - return pointer to associated interrogate tcw
* @tcw: pointer to the original tcw
*
* Return a pointer to the interrogate tcw associated with the specified tcw
* or %NULL if there is no associated interrogate tcw.
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
return (struct tcw *) ((addr_t) tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
/**
* tcw_get_data - return pointer to input/output data associated with tcw
* @tcw: pointer to the tcw
*
* Return the input or output data address specified in the tcw depending
* on whether the r-bit or the w-bit is set. If neither bit is set, return
* %NULL.
*/
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
return (void *) ((addr_t) tcw->input);
if (tcw->w)
return (void *) ((addr_t) tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
/**
* tcw_get_tccb - return pointer to tccb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tccb associated with this tcw.
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
return (struct tccb *) ((addr_t) tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
/**
* tcw_get_tsb - return pointer to tsb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tsb associated with this tcw.
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
return (struct tsb *) ((addr_t) tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
/**
* tcw_init - initialize tcw data structure
* @tcw: pointer to the tcw to be initialized
* @r: initial value of the r-bit
* @w: initial value of the w-bit
*
* Initialize all fields of the specified tcw data structure with zero and
* fill in the format, flags, r and w fields.
*/
void tcw_init(struct tcw *tcw, int r, int w)
{
memset(tcw, 0, sizeof(struct tcw));
tcw->format = TCW_FORMAT_DEFAULT;
tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
if (r)
tcw->r = 1;
if (w)
tcw->w = 1;
}
EXPORT_SYMBOL(tcw_init);
static inline size_t tca_size(struct tccb *tccb)
{
return tccb->tcah.tcal - 12;
}
static u32 calc_dcw_count(struct tccb *tccb)
{
int offset;
struct dcw *dcw;
u32 count = 0;
size_t size;
size = tca_size(tccb);
for (offset = 0; offset < size;) {
dcw = (struct dcw *) &tccb->tca[offset];
count += dcw->count;
if (!(dcw->flags & DCW_FLAGS_CC))
break;
offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
}
return count;
}
static u32 calc_cbc_size(struct tidaw *tidaw, int num)
{
int i;
u32 cbc_data;
u32 cbc_count = 0;
u64 data_count = 0;
for (i = 0; i < num; i++) {
if (tidaw[i].flags & TIDAW_FLAGS_LAST)
break;
/* TODO: find out if padding applies to total of data
* transferred or data transferred by this tidaw. Assumption:
* applies to total. */
data_count += tidaw[i].count;
if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
cbc_data = 4 + ALIGN(data_count, 4) - data_count;
cbc_count += cbc_data;
data_count += cbc_data;
}
}
return cbc_count;
}
/**
* tcw_finalize - finalize tcw length fields and tidaw list
* @tcw: pointer to the tcw
* @num_tidaws: the number of tidaws used to address input/output data or zero
* if no tida is used
*
* Calculate the input-/output-count and tccbl field in the tcw, add a
* tcat the tccb and terminate the data tidaw list if used.
*
* Note: in case input- or output-tida is used, the tidaw-list must be stored
* in contiguous storage (no ttic). The tcal field in the tccb must be
* up-to-date.
*/
void tcw_finalize(struct tcw *tcw, int num_tidaws)
{
struct tidaw *tidaw;
struct tccb *tccb;
struct tccb_tcat *tcat;
u32 count;
/* Terminate tidaw list. */
tidaw = tcw_get_data(tcw);
if (num_tidaws > 0)
tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
memset(tcat, 0, sizeof(tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
count += calc_cbc_size(tidaw, num_tidaws);
if (tcw->r)
tcw->input_count = count;
else if (tcw->w)
tcw->output_count = count;
tcat->count = ALIGN(count, 4) + 4;
/* Calculate tccbl. */
tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
sizeof(struct tccb_tcat) - 20) >> 2;
}
EXPORT_SYMBOL(tcw_finalize);
/**
* tcw_set_intrg - set the interrogate tcw address of a tcw
* @tcw: the tcw address
* @intrg_tcw: the address of the interrogate tcw
*
* Set the address of the interrogate tcw in the specified tcw.
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
tcw->intrg = (u32) ((addr_t) intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
/**
* tcw_set_data - set data address and tida flag of a tcw
* @tcw: the tcw address
* @data: the data address
* @use_tidal: zero of the data address specifies a contiguous block of data,
* non-zero if it specifies a list if tidaws.
*
* Set the input/output data address of a tcw (depending on the value of the
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
* is set as well.
*/
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
tcw->input = (u64) ((addr_t) data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
tcw->output = (u64) ((addr_t) data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
}
EXPORT_SYMBOL(tcw_set_data);
/**
* tcw_set_tccb - set tccb address of a tcw
* @tcw: the tcw address
* @tccb: the tccb address
*
* Set the address of the tccb in the specified tcw.
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
tcw->tccb = (u64) ((addr_t) tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
/**
* tcw_set_tsb - set tsb address of a tcw
* @tcw: the tcw address
* @tsb: the tsb address
*
* Set the address of the tsb in the specified tcw.
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
tcw->tsb = (u64) ((addr_t) tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
/**
* tccb_init - initialize tccb
* @tccb: the tccb address
* @size: the maximum size of the tccb
* @sac: the service-action-code to be user
*
* Initialize the header of the specified tccb by resetting all values to zero
* and filling in defaults for format, sac and initial tcal fields.
*/
void tccb_init(struct tccb *tccb, size_t size, u32 sac)
{
memset(tccb, 0, size);
tccb->tcah.format = TCCB_FORMAT_DEFAULT;
tccb->tcah.sac = sac;
tccb->tcah.tcal = 12;
}
EXPORT_SYMBOL(tccb_init);
/**
* tsb_init - initialize tsb
* @tsb: the tsb address
*
* Initialize the specified tsb by resetting all values to zero.
*/
void tsb_init(struct tsb *tsb)
{
memset(tsb, 0, sizeof(tsb));
}
EXPORT_SYMBOL(tsb_init);
/**
* tccb_add_dcw - add a dcw to the tccb
* @tccb: the tccb address
* @tccb_size: the maximum tccb size
* @cmd: the dcw command
* @flags: flags for the dcw
* @cd: pointer to control data for this dcw or NULL if none is required
* @cd_count: number of control data bytes for this dcw
* @count: number of data bytes for this dcw
*
* Add a new dcw to the specified tccb by writing the dcw information specified
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
* would exceed the available space as defined by @tccb_size.
*
* Note: the tcal field of the tccb header will be updates to reflect added
* content.
*/
struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
void *cd, u8 cd_count, u32 count)
{
struct dcw *dcw;
int size;
int tca_offset;
/* Check for space. */
tca_offset = tca_size(tccb);
size = ALIGN(sizeof(struct dcw) + cd_count, 4);
if (sizeof(struct tccb_tcah) + tca_offset + size +
sizeof(struct tccb_tcat) > tccb_size)
return ERR_PTR(-ENOSPC);
/* Add dcw to tca. */
dcw = (struct dcw *) &tccb->tca[tca_offset];
memset(dcw, 0, size);
dcw->cmd = cmd;
dcw->flags = flags;
dcw->count = count;
dcw->cd_count = cd_count;
if (cd)
memcpy(&dcw->cd[0], cd, cd_count);
tccb->tcah.tcal += size;
return dcw;
}
EXPORT_SYMBOL(tccb_add_dcw);
/**
* tcw_add_tidaw - add a tidaw to a tcw
* @tcw: the tcw address
* @num_tidaws: the current number of tidaws
* @flags: flags for the new tidaw
* @addr: address value for the new tidaw
* @count: count value for the new tidaw
*
* Add a new tidaw to the input/output data tidaw-list of the specified tcw
* (depending on the value of the r-flag and w-flag) and return a pointer to
* the new tidaw.
*
* Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
* must ensure that there is enough space for the new tidaw. The last-tidaw
* flag for the last tidaw in the list will be set by tcw_finalize.
*/
struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
void *addr, u32 count)
{
struct tidaw *tidaw;
/* Add tidaw to tidaw-list. */
tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
tidaw->addr = (u64) ((addr_t) addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);

View File

@@ -0,0 +1,132 @@
/*
* drivers/s390/cio/idset.c
*
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/vmalloc.h>
#include <linux/bitops.h>
#include "idset.h"
#include "css.h"
struct idset {
int num_ssid;
int num_id;
unsigned long bitmap[0];
};
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)
{
struct idset *set;
set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
if (set) {
set->num_ssid = num_ssid;
set->num_id = num_id;
memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
}
return set;
}
void idset_free(struct idset *set)
{
vfree(set);
}
void idset_clear(struct idset *set)
{
memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
}
void idset_fill(struct idset *set)
{
memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
}
static inline void idset_add(struct idset *set, int ssid, int id)
{
set_bit(ssid * set->num_id + id, set->bitmap);
}
static inline void idset_del(struct idset *set, int ssid, int id)
{
clear_bit(ssid * set->num_id + id, set->bitmap);
}
static inline int idset_contains(struct idset *set, int ssid, int id)
{
return test_bit(ssid * set->num_id + id, set->bitmap);
}
static inline int idset_get_first(struct idset *set, int *ssid, int *id)
{
int bitnum;
bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
if (bitnum >= set->num_ssid * set->num_id)
return 0;
*ssid = bitnum / set->num_id;
*id = bitnum % set->num_id;
return 1;
}
struct idset *idset_sch_new(void)
{
return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
}
void idset_sch_add(struct idset *set, struct subchannel_id schid)
{
idset_add(set, schid.ssid, schid.sch_no);
}
void idset_sch_del(struct idset *set, struct subchannel_id schid)
{
idset_del(set, schid.ssid, schid.sch_no);
}
int idset_sch_contains(struct idset *set, struct subchannel_id schid)
{
return idset_contains(set, schid.ssid, schid.sch_no);
}
int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
{
int ssid = 0;
int id = 0;
int rc;
rc = idset_get_first(set, &ssid, &id);
if (rc) {
init_subchannel_id(schid);
schid->ssid = ssid;
schid->sch_no = id;
}
return rc;
}
int idset_is_empty(struct idset *set)
{
int bitnum;
bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
if (bitnum >= set->num_ssid * set->num_id)
return 1;
return 0;
}
void idset_add_set(struct idset *to, struct idset *from)
{
unsigned long i, len;
len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
__BITOPS_WORDS(from->num_ssid * from->num_id));
for (i = 0; i < len ; i++)
to->bitmap[i] |= from->bitmap[i];
}

Some files were not shown because too many files have changed in this diff Show More