add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 io routines.
EXTRA_CFLAGS += -Iarch/ia64/sn/include
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o

View File

@@ -0,0 +1,177 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
/*
* mark_ate: Mark the ate as either free or inuse.
*/
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
u64 value)
{
u64 *ate = ate_resource->ate;
int index;
int length = 0;
for (index = start; length < number; index++, length++)
ate[index] = value;
}
/*
* find_free_ate: Find the first free ate index starting from the given
* index for the desired consecutive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
u64 *ate = ate_resource->ate;
int index;
int start_free;
for (index = start; index < ate_resource->num_ate;) {
if (!ate[index]) {
int i;
int free;
free = 0;
start_free = index; /* Found start free ate */
for (i = start_free; i < ate_resource->num_ate; i++) {
if (!ate[i]) { /* This is free */
if (++free == count)
return start_free;
} else {
index = i + 1;
break;
}
}
if (i >= ate_resource->num_ate)
return -1;
} else
index++; /* Try next ate */
}
return -1;
}
/*
* free_ate_resource: Free the requested number of ATEs.
*/
static inline void free_ate_resource(struct ate_resource *ate_resource,
int start)
{
mark_ate(ate_resource, start, ate_resource->ate[start], 0);
if ((ate_resource->lowest_free_index > start) ||
(ate_resource->lowest_free_index < 0))
ate_resource->lowest_free_index = start;
}
/*
* alloc_ate_resource: Allocate the requested number of ATEs.
*/
static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int ate_needed)
{
int start_index;
/*
* Check for ate exhaustion.
*/
if (ate_resource->lowest_free_index < 0)
return -1;
/*
* Find the required number of free consecutive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
ate_needed);
if (start_index >= 0)
mark_ate(ate_resource, start_index, ate_needed, ate_needed);
ate_resource->lowest_free_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
return start_index;
}
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
* Indices in rm map range from 1..num_entries. Indices returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
*/
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
int status;
unsigned long flags;
spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
return status;
}
/*
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
return pcireg_int_ate_addr(pcibus_info, ate_index);
}
panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
}
/*
* Update the ate.
*/
void inline
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile u64 ate)
{
while (count-- > 0) {
if (ate_index < pcibus_info->pbi_int_ate_size) {
pcireg_int_ate_set(pcibus_info, ate_index, ate);
} else {
panic("ate_write: invalid ate_index 0x%x", ate_index);
}
ate_index++;
ate += IOPGSIZE;
}
pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
}
void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
{
volatile u64 ate;
int count;
unsigned long flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
ate = *pcibr_ate_addr(pcibus_info, index);
count = pcibus_info->pbi_int_ate_resource.ate[index];
ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
}
spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
}

View File

@@ -0,0 +1,412 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/tiocp.h>
#include "tio.h"
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
extern int sn_ioif_inited;
/* =====================================================================
* DMA MANAGEMENT
*
* The Bridge ASIC provides three methods of doing DMA: via a "direct map"
* register available in 32-bit PCI space (which selects a contiguous 2G
* address space on some other widget), via "direct" addressing via 64-bit
* PCI space (all destination information comes from the PCI address,
* including transfer attributes), and via a "mapped" region that allows
* a bunch of different small mappings to be established with the PMU.
*
* For efficiency, we most prefer to use the 32bit direct mapping facility,
* since it requires no resource allocations. The advantage of using the
* PMU over the 64-bit direct is that single-cycle PCI addressing can be
* used; the advantage of using 64-bit direct over PMU addressing is that
* we do not have to allocate entries in the PMU.
*/
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
pdi_linux_pcidev->devfn)) - 1;
int ate_count;
int ate_index;
u64 ate_flags = flags | PCI32_ATE_V;
u64 ate;
u64 pci_addr;
u64 xio_addr;
u64 offset;
/* PIC in PCI-X mode does not supports 32bit PageMap mode */
if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
return 0;
}
/* Calculate the number of ATEs needed. */
if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
+req_size /* max mapping bytes */
- 1) + 1; /* round UP */
} else { /* assume requested target is page aligned */
ate_count = IOPG(req_size /* max mapping bytes */
- 1) + 1; /* round UP */
}
/* Get the number of ATEs required. */
ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
if (ate_index < 0)
return 0;
/* In PCI-X mode, Prefetch not supported */
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
/* If PIC, put the targetid in the ATE */
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
/*
* If we're mapping for MSI, set the MSI bit in the ATE. If it's a
* TIOCP based pci bus, we also need to set the PIO bit in the ATE.
*/
if (dma_flags & SN_DMA_MSI) {
ate |= PCI32_ATE_MSI;
if (IS_TIOCP_SOFT(pcibus_info))
ate |= PCI32_ATE_PIO;
}
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
* Set up the DMA mapped Address.
*/
pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
/*
* If swap was set in device in pcibr_endian_set()
* we need to turn swapping on.
*/
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
u64 dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
pci_addr = IS_PIC_SOFT(pcibus_info) ?
PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
pci_addr = paddr;
pci_addr |= dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
pci_addr &= ~PCI64_ATTR_PREF;
/* Handle Bridge Chipset differences */
if (IS_PIC_SOFT(pcibus_info)) {
pci_addr |=
((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
pci_addr |= (dma_flags & SN_DMA_MSI) ?
TIOCP_PCI64_CMDTYPE_MSI :
TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
pci_addr |= PCI64_ATTR_VIRTUAL;
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
u64 xio_addr;
u64 xio_base;
u64 offset;
u64 endoff;
if (IS_PCIX(pcibus_info)) {
return 0;
}
if (dma_flags & SN_DMA_MSI)
return 0;
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
endoff = req_size + offset;
if ((req_size > (1ULL << 31)) || /* Too Big */
(xio_addr < xio_base) || /* Out of range for mappings */
(endoff > (1ULL << 31))) { /* Too Big */
return 0;
}
return PCI32_DIRECT_BASE | offset;
}
/*
* Wrapper routine for freeing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
{
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_info *pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_pcibus_info;
if (IS_PCI32_MAPPED(dma_handle)) {
int ate_index;
ate_index =
IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
pcibr_ate_free(pcibus_info, ate_index);
}
}
/*
* On SN systems there is a race condition between a PIO read response and
* DMA's. In rare cases, the read response may beat the DMA, causing the
* driver to think that data in memory is complete and meaningful. This code
* eliminates that race. This routine is called by the PIO read routines
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
* the interrupt is targetted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
* unlike the PIC Device(x) Write Request Buffer Flush register.
*/
void sn_dma_flush(u64 addr)
{
nasid_t nasid;
int is_tio;
int wid_num;
int i, j;
unsigned long flags;
u64 itte;
struct hubdev_info *hubinfo;
struct sn_flush_device_kernel *p;
struct sn_flush_device_common *common;
struct sn_flush_nasid_entry *flush_nasid_list;
if (!sn_ioif_inited)
return;
nasid = NASID_GET(addr);
if (-1 == nasid_to_cnodeid(nasid))
return;
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
BUG_ON(!hubinfo);
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
return;
is_tio = (nasid & 1);
if (is_tio) {
int itte_index;
if (TIO_HWIN(addr))
itte_index = 0;
else if (TIO_BWIN_WINDOWNUM(addr))
itte_index = TIO_BWIN_WINDOWNUM(addr);
else
itte_index = -1;
if (itte_index >= 0) {
itte = flush_nasid_list->iio_itte[itte_index];
if (! TIO_ITTE_VALID(itte))
return;
wid_num = TIO_ITTE_WIDGET(itte);
} else
wid_num = TIO_SWIN_WIDGETNUM(addr);
} else {
if (BWIN_WINDOWNUM(addr)) {
itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
wid_num = IIO_ITTE_WIDGET(itte);
} else
wid_num = SWIN_WIDGETNUM(addr);
}
if (flush_nasid_list->widget_p[wid_num] == NULL)
return;
p = &flush_nasid_list->widget_p[wid_num][0];
/* find a matching BAR */
for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
common = p->common;
for (j = 0; j < PCI_ROM_RESOURCE; j++) {
if (common->sfdl_bar_list[j].start == 0)
break;
if (addr >= common->sfdl_bar_list[j].start
&& addr <= common->sfdl_bar_list[j].end)
break;
}
if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
break;
}
/* if no matching BAR, return without doing anything. */
if (i == DEV_PER_WIDGET)
return;
/*
* For TIOCP use the Device(x) Write Request Buffer Flush Bridge
* register since it ensures the data has entered the coherence
* domain, unlike PIC.
*/
if (is_tio) {
/*
* Note: devices behind TIOCE should never be matched in the
* above code, and so the following code is PIC/CP centric.
* If CE ever needs the sn_dma_flush mechanism, we will have
* to account for that here and in tioce_bus_fixup().
*/
u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
return;
} else {
pcireg_wrb_flush_get(common->sfdl_pcibus_info,
(common->sfdl_slot - 1));
}
} else {
spin_lock_irqsave(&p->sfdl_flush_lock, flags);
*common->sfdl_flush_addr = 0;
/* force an interrupt. */
*(volatile u32 *)(common->sfdl_force_int_addr) = 1;
/* wait for the interrupt to come back. */
while (*(common->sfdl_flush_addr) != 0x10f)
cpu_relax();
/* okay, everything is synched up. */
spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
}
return;
}
/*
* DMA interfaces. Called from pci_dma.c routines.
*/
dma_addr_t
pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
/* SN cannot support DMA addresses smaller than 32 bits. */
if (hwdev->dma_mask < 0x7fffffff) {
return 0;
}
if (hwdev->dma_mask == ~0UL) {
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
size, PCI32_ATE_PREF,
dma_flags);
}
}
return dma_handle;
}
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
}
EXPORT_SYMBOL(sn_dma_flush);

View File

@@ -0,0 +1,263 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0;
}
int
sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
void *resp)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
segment, busnum, (u64) device, (u64) action,
(u64) resp, 0, 0);
return (int)ret_stuff.v0;
}
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
(u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
return (int)ret_stuff.v0;
}
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
long rc;
u16 uninitialized_var(ioboard); /* GCC be quiet */
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
if (rc) {
printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
rc);
return 0;
}
return ioboard;
}
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg)
{
struct pcibus_info *soft = arg;
if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
return IRQ_HANDLED;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_common *common;
if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
soft->pbi_buscommon.bs_base = (unsigned long)
ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
sizeof(struct pic));
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
*/
if (PAGE_SIZE < 16384) {
pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
} else {
pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
}
nasid = NASID_GET(soft->pbi_buscommon.bs_base);
cnode = nasid_to_cnodeid(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
if (hubdev_info->hdi_flush_nasid_list.widget_p) {
sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
widget_p[(int)soft->pbi_buscommon.bs_xid];
if (sn_flush_device_kernel) {
for (j = 0; j < DEV_PER_WIDGET;
j++, sn_flush_device_kernel++) {
common = sn_flush_device_kernel->common;
if (common->sfdl_slot == -1)
continue;
if ((common->sfdl_persistent_segment ==
soft->pbi_buscommon.bs_persist_segment) &&
(common->sfdl_persistent_busnum ==
soft->pbi_buscommon.bs_persist_busnum))
common->sfdl_pcibus_info =
soft;
}
}
}
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
if (!soft->pbi_int_ate_resource.ate) {
kfree(soft);
return NULL;
}
return soft;
}
void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
if (! sn_irq_info->irq_bridge)
return;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
pcireg_force_intr_set(pcibus_info, bit);
}
}
void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
/* Disable the device's IRQ */
pcireg_intr_enable_bit_clr(pcibus_info, (1 << bit));
/* Change the device's IRQ */
pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
/* Re-enable the device's IRQ */
pcireg_intr_enable_bit_set(pcibus_info, (1 << bit));
pcibr_force_interrupt(sn_irq_info);
}
}
/*
* Provider entries for PIC/CP
*/
struct sn_pcibus_provider pcibr_provider = {
.dma_map = pcibr_dma_map,
.dma_map_consistent = pcibr_dma_map_consistent,
.dma_unmap = pcibr_dma_unmap,
.bus_fixup = pcibr_bus_fixup,
.force_interrupt = pcibr_force_interrupt,
.target_interrupt = pcibr_target_interrupt
};
int
pcibr_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
return 0;
}
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);

View File

@@ -0,0 +1,285 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <asm/sn/io.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pic.h>
#include <asm/sn/tiocp.h>
union br_ptr {
struct tiocp tio;
struct pic pic;
};
/*
* Control Register Access -- Read/Write 0000_0020
*/
void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_setq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
break;
case PCIBR_BRIDGETYPE_PIC:
ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
break;
default:
panic
("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
/* Read of the Target Flush should always return zero */
if (ret != 0)
panic("pcireg_tflush_get:Target Flush failed\n");
return ret;
}
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
break;
case PCIBR_BRIDGETYPE_PIC:
ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
break;
default:
panic
("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
return ret;
}
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
u64 addr)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
TIOCP_HOST_INTR_ADDR);
__sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
(addr & TIOCP_HOST_INTR_ADDR));
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
PIC_HOST_INTR_ADDR);
__sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
(addr & PIC_HOST_INTR_ADDR));
break;
default:
panic
("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
*/
void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
writeq(1, &ptr->tio.cp_force_pin[int_n]);
break;
case PCIBR_BRIDGETYPE_PIC:
writeq(1, &ptr->pic.p_force_pin[int_n]);
break;
default:
panic
("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret =
__sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
break;
case PCIBR_BRIDGETYPE_PIC:
ret =
__sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
break;
default:
panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
}
}
/* Read of the Write Buffer Flush should always return zero */
return ret;
}
void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
u64 val)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
break;
case PCIBR_BRIDGETYPE_PIC:
writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
break;
default:
panic
("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 __iomem *ret = NULL;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = &ptr->tio.cp_int_ate_ram[ate_index];
break;
case PCIBR_BRIDGETYPE_PIC:
ret = &ptr->pic.p_int_ate_ram[ate_index];
break;
default:
panic
("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
ptr);
}
}
return ret;
}