first support for v2 irq core

This commit is contained in:
Ralph Metzler 2016-04-15 18:08:51 +02:00
parent 92d8b37839
commit 8f30246ff1
5 changed files with 329 additions and 192 deletions

View File

@ -54,6 +54,10 @@ static int xo2_speed = 2;
module_param(xo2_speed, int, 0444);
MODULE_PARM_DESC(xo2_speed, "default transfer speed for xo2 based duoflex, 0=55,1=75,2=90,3=104 MBit/s, default=2, use attribute to change for individual cards");
static int alt_dma= 0;
module_param(alt_dma, int, 0444);
MODULE_PARM_DESC(alt_dma, "use alternative DMA buffer handling");
#define DDB_MAX_ADAPTER 64
static struct ddb *ddbs[DDB_MAX_ADAPTER];
@ -203,7 +207,6 @@ static int ddb_redirect(u32 i, u32 p)
/****************************************************************************/
/****************************************************************************/
#ifdef DDB_ALT_DMA
static void dma_free(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
{
int i;
@ -212,54 +215,16 @@ static void dma_free(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
return;
for (i = 0; i < dma->num; i++) {
if (dma->vbuf[i]) {
dma_unmap_single(&pdev->dev, dma->pbuf[i],
dma->size,
dir ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
kfree(dma->vbuf[i]);
dma->vbuf[i] = NULL;
}
}
}
static int dma_alloc(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
{
int i;
if (!dma)
return 0;
for (i = 0; i < dma->num; i++) {
dma->vbuf[i] = kmalloc(dma->size, __GFP_REPEAT);
if (!dma->vbuf[i])
return -ENOMEM;
dma->pbuf[i] = dma_map_single(&pdev->dev, dma->vbuf[i],
dma->size,
dir ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, dma->pbuf[i])) {
kfree(dma->vbuf[i]);
return -ENOMEM;
}
}
return 0;
}
#else
static void dma_free(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
{
int i;
if (!dma)
return;
for (i = 0; i < dma->num; i++) {
if (dma->vbuf[i]) {
#if 0
pci_free_consistent(pdev, dma->size,
dma->vbuf[i], dma->pbuf[i]);
#else
dma_free_coherent(&pdev->dev, dma->size,
dma->vbuf[i], dma->pbuf[i]);
#endif
if (alt_dma) {
dma_unmap_single(&pdev->dev, dma->pbuf[i],
dma->size,
dir ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
kfree(dma->vbuf[i]);
} else
dma_free_coherent(&pdev->dev, dma->size,
dma->vbuf[i],
dma->pbuf[i]);
dma->vbuf[i] = 0;
}
}
@ -272,19 +237,30 @@ static int dma_alloc(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
if (!dma)
return 0;
for (i = 0; i < dma->num; i++) {
#if 0
dma->vbuf[i] = pci_alloc_consistent(pdev, dma->size,
&dma->pbuf[i]);
#else
dma->vbuf[i] = dma_alloc_coherent(&pdev->dev, dma->size,
&dma->pbuf[i], GFP_KERNEL);
#endif
if (!dma->vbuf[i])
return -ENOMEM;
if (alt_dma) {
dma->vbuf[i] = kmalloc(dma->size, __GFP_REPEAT);
if (!dma->vbuf[i])
return -ENOMEM;
dma->pbuf[i] = dma_map_single(&pdev->dev,
dma->vbuf[i],
dma->size,
dir ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, dma->pbuf[i])) {
kfree(dma->vbuf[i]);
return -ENOMEM;
}
} else {
dma->vbuf[i] = dma_alloc_coherent(&pdev->dev,
dma->size,
&dma->pbuf[i],
GFP_KERNEL);
if (!dma->vbuf[i])
return -ENOMEM;
}
}
return 0;
}
#endif
static int ddb_buffers_alloc(struct ddb *dev)
{
@ -586,12 +562,11 @@ static ssize_t ddb_output_write(struct ddb_output *output,
output->dma->coff,
buf, len))
return -EIO;
#ifdef DDB_ALT_DMA
dma_sync_single_for_device(dev->dev,
output->dma->pbuf[
output->dma->cbuf],
output->dma->size, DMA_TO_DEVICE);
#endif
if (alt_dma)
dma_sync_single_for_device(dev->dev,
output->dma->pbuf[
output->dma->cbuf],
output->dma->size, DMA_TO_DEVICE);
left -= len;
buf += len;
output->dma->coff += len;
@ -719,11 +694,10 @@ static size_t ddb_input_read(struct ddb_input *input,
free = input->dma->size - input->dma->coff;
if (free > left)
free = left;
#ifdef DDB_ALT_DMA
dma_sync_single_for_cpu(dev->dev,
input->dma->pbuf[input->dma->cbuf],
input->dma->size, DMA_FROM_DEVICE);
#endif
if (alt_dma)
dma_sync_single_for_cpu(dev->dev,
input->dma->pbuf[input->dma->cbuf],
input->dma->size, DMA_FROM_DEVICE);
ret = copy_to_user(buf, input->dma->vbuf[input->dma->cbuf] +
input->dma->coff, free);
if (ret)
@ -2242,6 +2216,15 @@ static void ddb_port_probe(struct ddb_port *port)
port->class = DDB_PORT_MOD;
return;
}
if (dev->link[l].info->type == DDB_OCTOPRO_HDIN) {
if( port->nr == 0 ) {
dev->link[l].info->type = DDB_OCTOPUS;
port->name = "HDIN";
port->class = DDB_PORT_LOOP;
}
return;
}
if (dev->link[l].info->type == DDB_OCTOPUS_MAX) {
port->name = "DUAL DVB-S2 MAX";
@ -2838,10 +2821,9 @@ static void input_write_dvb(struct ddb_input *input,
/*pr_err("Overflow dma %d\n", dma->nr);*/
ack = 1;
}
#ifdef DDB_ALT_DMA
dma_sync_single_for_cpu(dev->dev, dma2->pbuf[dma->cbuf],
dma2->size, DMA_FROM_DEVICE);
#endif
if (alt_dma)
dma_sync_single_for_cpu(dev->dev, dma2->pbuf[dma->cbuf],
dma2->size, DMA_FROM_DEVICE);
dvb_dmx_swfilter_packets(&dvb->demux,
dma2->vbuf[dma->cbuf],
dma2->size / 188);
@ -2967,8 +2949,8 @@ static void ddb_input_init(struct ddb_port *port, int nr, int pnr,
struct ddb_input *input = &dev->input[anr];
if (dev->has_dma) {
dev->handler[dma_nr + 8] = input_handler;
dev->handler_data[dma_nr + 8] = (unsigned long) input;
dev->handler[0][dma_nr + 8] = input_handler;
dev->handler_data[0][dma_nr + 8] = (unsigned long) input;
}
port->input[pnr] = input;
input->nr = nr;
@ -2990,8 +2972,8 @@ static void ddb_output_init(struct ddb_port *port, int nr, int dma_nr)
struct ddb_output *output = &dev->output[nr];
if (dev->has_dma) {
dev->handler[dma_nr + 8] = output_handler;
dev->handler_data[dma_nr + 8] = (unsigned long) output;
dev->handler[0][dma_nr + 8] = output_handler;
dev->handler_data[0][dma_nr + 8] = (unsigned long) output;
}
port->output = output;
output->nr = nr;
@ -3115,9 +3097,9 @@ static void ddb_ports_init(struct ddb *dev)
break;
case DDB_MOD:
ddb_output_init(port, i, i);
dev->handler[i + 18] =
dev->handler[0][i + 18] =
ddbridge_mod_rate_handler;
dev->handler_data[i + 18] =
dev->handler_data[0][i + 18] =
(unsigned long) &dev->output[i];
break;
default:
@ -3158,8 +3140,8 @@ static void ddb_ports_release(struct ddb *dev)
/****************************************************************************/
#define IRQ_HANDLE(_nr) \
do { if ((s & (1UL << _nr)) && dev->handler[_nr]) \
dev->handler[_nr](dev->handler_data[_nr]); } \
do { if ((s & (1UL << _nr)) && dev->handler[0][_nr]) \
dev->handler[0][_nr](dev->handler_data[0][_nr]); } \
while (0)
static void irq_handle_msg(struct ddb *dev, u32 s)
@ -3272,6 +3254,46 @@ static irqreturn_t irq_handler(int irq, void *dev_id)
return ret;
}
static irqreturn_t irq_handle_v2_n(struct ddb *dev, u32 n)
{
u32 reg = INTERRUPT_V2_STATUS + 4 + 4 * n;
u32 s = ddbreadl(dev, reg);
u32 off = n * 32;
if (!s)
return IRQ_NONE;
ddbwritel(dev, s, reg);
if ((s & 0x000000ff)) {
IRQ_HANDLE(0 + off);
IRQ_HANDLE(1 + off);
IRQ_HANDLE(2 + off);
IRQ_HANDLE(3 + off);
IRQ_HANDLE(4 + off);
IRQ_HANDLE(5 + off);
IRQ_HANDLE(6 + off);
IRQ_HANDLE(7 + off);
}
}
static irqreturn_t irq_handler_v2(int irq, void *dev_id)
{
struct ddb *dev = (struct ddb *) dev_id;
u32 s = ddbreadl(dev, INTERRUPT_V2_STATUS);
int ret = IRQ_HANDLED;
if (!s)
return IRQ_NONE;
do {
if (s & 0x80000000)
return IRQ_NONE;
if (s & 0x00000001)
irq_handle_v2_n(dev, 0);
} while ((s = ddbreadl(dev, INTERRUPT_V2_STATUS)));
return ret;
}
#ifdef DDB_TEST_THREADED
static irqreturn_t irq_thread(int irq, void *dev_id)
{
@ -4492,9 +4514,9 @@ static void ddb_device_destroy(struct ddb *dev)
device_destroy(&ddb_class, MKDEV(ddb_major, dev->nr));
}
#define LINK_IRQ_HANDLE(_nr) \
do { if ((s & (1UL << _nr)) && dev->handler[_nr + off]) \
dev->handler[_nr + off](dev->handler_data[_nr + off]); } \
#define LINK_IRQ_HANDLE(_l, _nr) \
do { if ((s & (1UL << _nr)) && dev->handler[_l][_nr]) \
dev->handler[_l][_nr](dev->handler_data[_l][_nr]); } \
while (0)
static void gtl_link_handler(unsigned long priv)
@ -4510,18 +4532,19 @@ static void link_tasklet(unsigned long data)
{
struct ddb_link *link = (struct ddb_link *) data;
struct ddb *dev = link->dev;
u32 s, off = 32 * link->nr, tag = DDB_LINK_TAG(link->nr);
u32 s, tag = DDB_LINK_TAG(link->nr);
u32 l = link->nr;
s = ddbreadl(dev, tag | INTERRUPT_STATUS);
pr_info("gtl_irq %08x = %08x\n", tag | INTERRUPT_STATUS, s);
if (!s)
return;
ddbwritel(dev, s, tag | INTERRUPT_ACK);
LINK_IRQ_HANDLE(0);
LINK_IRQ_HANDLE(1);
LINK_IRQ_HANDLE(2);
LINK_IRQ_HANDLE(3);
LINK_IRQ_HANDLE(l, 0);
LINK_IRQ_HANDLE(l, 1);
LINK_IRQ_HANDLE(l, 2);
LINK_IRQ_HANDLE(l, 3);
}
static void gtl_irq_handler(unsigned long priv)
@ -4529,14 +4552,14 @@ static void gtl_irq_handler(unsigned long priv)
struct ddb_link *link = (struct ddb_link *) priv;
#if 1
struct ddb *dev = link->dev;
u32 s, off = 32 * link->nr, tag = DDB_LINK_TAG(link->nr);
u32 s, l = link->nr, tag = DDB_LINK_TAG(link->nr);
while ((s = ddbreadl(dev, tag | INTERRUPT_STATUS))) {
ddbwritel(dev, s, tag | INTERRUPT_ACK);
LINK_IRQ_HANDLE(0);
LINK_IRQ_HANDLE(1);
LINK_IRQ_HANDLE(2);
LINK_IRQ_HANDLE(3);
LINK_IRQ_HANDLE(l, 0);
LINK_IRQ_HANDLE(l, 1);
LINK_IRQ_HANDLE(l, 2);
LINK_IRQ_HANDLE(l, 3);
}
#else
tasklet_schedule(&link->tasklet);
@ -4648,8 +4671,8 @@ static int ddb_gtl_init_link(struct ddb *dev, u32 l)
ddbwritel(dev, 1, 0x1a0);
dev->handler_data[11] = (unsigned long) link;
dev->handler[11] = gtl_irq_handler;
dev->handler_data[0][11] = (unsigned long) link;
dev->handler[0][11] = gtl_irq_handler;
pr_info("GTL %s\n", dev->link[l].info->name);
pr_info("GTL HW %08x REGMAP %08x\n",
@ -4669,8 +4692,8 @@ static int ddb_gtl_init(struct ddb *dev)
{
u32 l;
dev->handler_data[10] = (unsigned long) dev;
dev->handler[10] = gtl_link_handler;
dev->handler_data[0][10] = (unsigned long) dev;
dev->handler[0][10] = gtl_link_handler;
for (l = 1; l < dev->link[0].info->regmap->gtl->num + 1; l++)
ddb_gtl_init_link(dev, l);

View File

@ -272,8 +272,8 @@ static int ddb_i2c_init(struct ddb *dev)
if (!(dev->link[l].info->i2c_mask & (1 << i)))
continue;
i2c = &dev->i2c[num];
dev->handler_data[i + l * 32] = (unsigned long) i2c;
dev->handler[i + l * 32] = i2c_handler;
dev->handler_data[l][i] = (unsigned long) i2c;
dev->handler[l][i] = i2c_handler;
stat = ddb_i2c_add(dev, i2c, regmap, l, i, num);
if (stat)
break;

View File

@ -105,6 +105,26 @@
#define INTMASK_TSOUTPUT4 (0x00080000)
#define INTERRUPT_V2_CONTROL (INTERRUPT_BASE + 0x00)
#define INTERRUPT_V2_ENABLE_1 (INTERRUPT_BASE + 0x04)
#define INTERRUPT_V2_ENABLE_2 (INTERRUPT_BASE + 0x08)
#define INTERRUPT_V2_ENABLE_3 (INTERRUPT_BASE + 0x0c)
#define INTERRUPT_V2_ENABLE_4 (INTERRUPT_BASE + 0x10)
#define INTERRUPT_V2_ENABLE_5 (INTERRUPT_BASE + 0x14)
#define INTERRUPT_V2_ENABLE_6 (INTERRUPT_BASE + 0x18)
#define INTERRUPT_V2_ENABLE_7 (INTERRUPT_BASE + 0x1c)
#define INTERRUPT_V2_STATUS (INTERRUPT_BASE + 0x20)
#define INTERRUPT_V2_STATUS_1 (INTERRUPT_BASE + 0x04)
#define INTERRUPT_V2_STATUS_2 (INTERRUPT_BASE + 0x08)
#define INTERRUPT_V2_STATUS_3 (INTERRUPT_BASE + 0x0c)
#define INTERRUPT_V2_STATUS_4 (INTERRUPT_BASE + 0x10)
#define INTERRUPT_V2_STATUS_5 (INTERRUPT_BASE + 0x14)
#define INTERRUPT_V2_STATUS_6 (INTERRUPT_BASE + 0x18)
#define INTERRUPT_V2_STATUS_7 (INTERRUPT_BASE + 0x1c)
/* Modulator registers */

View File

@ -24,7 +24,6 @@
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
/*#define DDB_ALT_DMA*/
#define DDB_USE_WORK
/*#define DDB_TEST_THREADED*/
@ -58,6 +57,29 @@ static void ddb_unmap(struct ddb *dev)
vfree(dev);
}
static void __devexit ddb_irq_exit(struct ddb *dev)
{
if ((dev->link[0].ids.regmapid & 0xffff0000) == 0x00020000) {
//ddbwritel(dev, 0x00000000, INTERRUPT_V2_CONTROL);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_1);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_2);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_3);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_4);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_5);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_6);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_7);
} else {
ddbwritel(dev, 0, INTERRUPT_ENABLE);
ddbwritel(dev, 0, MSI1_ENABLE);
}
if (dev->msi == 2)
free_irq(dev->pdev->irq + 1, dev);
free_irq(dev->pdev->irq, dev);
#ifdef CONFIG_PCI_MSI
if (dev->msi)
pci_disable_msi(dev->pdev);
#endif
}
static void __devexit ddb_remove(struct pci_dev *pdev)
{
@ -70,16 +92,7 @@ static void __devexit ddb_remove(struct pci_dev *pdev)
if (dev->link[0].info->ns_num)
ddbwritel(dev, 0, ETHER_CONTROL);
ddbwritel(dev, 0, INTERRUPT_ENABLE);
ddbwritel(dev, 0, MSI1_ENABLE);
if (dev->msi == 2)
free_irq(dev->pdev->irq + 1, dev);
free_irq(dev->pdev->irq, dev);
#ifdef CONFIG_PCI_MSI
if (dev->msi)
pci_disable_msi(dev->pdev);
#endif
ddb_irq_exit(dev);
ddb_ports_release(dev);
ddb_buffers_free(dev);
@ -93,12 +106,138 @@ static void __devexit ddb_remove(struct pci_dev *pdev)
#define __devinitdata
#endif
static int __devinit ddb_irq_msi(struct ddb *dev, int nr)
{
int stat;
#ifdef CONFIG_PCI_MSI
if (msi && pci_msi_enabled()) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
stat = pci_enable_msi_range(dev->pdev, 1, nr);
if (stat >= 1) {
dev->msi = stat;
pr_info("DDBridge: using %d MSI interrupt(s)\n",
dev->msi);
} else
pr_info("DDBridge: MSI not available.\n");
#else
stat = pci_enable_msi_block(dev->pdev, nr);
if (stat == 0) {
dev->msi = 2;
pr_info("DDBridge: using 2 MSI interrupts\n");
}
if (stat == 1)
stat = pci_enable_msi(dev->pdev);
if (stat < 0) {
pr_info("DDBridge: MSI not available.\n");
} else {
dev->msi++;
}
#endif
}
return stat;
}
static int __devinit ddb_irq_init2(struct ddb *dev)
{
int stat;
int irq_flag = IRQF_SHARED;
pr_info("init type 2 IRQ hardware block\n");
ddbwritel(dev, 0x00000000, INTERRUPT_V2_CONTROL);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_1);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_2);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_3);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_4);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_5);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_6);
ddbwritel(dev, 0x00000000, INTERRUPT_V2_ENABLE_7);
ddb_irq_msi(dev, 1);
if (dev->msi)
irq_flag = 0;
stat = request_irq(dev->pdev->irq, irq_handler_v2,
irq_flag, "ddbridge", (void *) dev);
if (stat < 0)
return stat;
ddbwritel(dev, 0x0f, INTERRUPT_V2_CONTROL);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_1);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_2);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_3);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_4);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_5);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_6);
ddbwritel(dev, 0xffffffff, INTERRUPT_V2_ENABLE_7);
return stat;
}
static int __devinit ddb_irq_init(struct ddb *dev)
{
int stat;
int irq_flag = IRQF_SHARED;
if ((dev->link[0].ids.regmapid & 0xffff0000) == 0x00020000)
return ddb_irq_init2(dev);
ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE);
ddbwritel(dev, 0x00000000, MSI1_ENABLE);
ddbwritel(dev, 0x00000000, MSI2_ENABLE);
ddbwritel(dev, 0x00000000, MSI3_ENABLE);
ddbwritel(dev, 0x00000000, MSI4_ENABLE);
ddbwritel(dev, 0x00000000, MSI5_ENABLE);
ddbwritel(dev, 0x00000000, MSI6_ENABLE);
ddbwritel(dev, 0x00000000, MSI7_ENABLE);
ddb_irq_msi(dev, 2);
if (dev->msi)
irq_flag = 0;
if (dev->msi == 2) {
stat = request_irq(dev->pdev->irq, irq_handler0,
irq_flag, "ddbridge", (void *) dev);
if (stat < 0)
return stat;
stat = request_irq(dev->pdev->irq + 1, irq_handler1,
irq_flag, "ddbridge", (void *) dev);
if (stat < 0) {
free_irq(dev->pdev->irq, dev);
return stat;
}
} else
#endif
{
#ifdef DDB_TEST_THREADED
stat = request_threaded_irq(dev->pdev->irq, irq_handler,
irq_thread,
irq_flag,
"ddbridge", (void *) dev);
#else
stat = request_irq(dev->pdev->irq, irq_handler,
irq_flag, "ddbridge", (void *) dev);
#endif
if (stat < 0)
return stat;
}
/*ddbwritel(dev, 0xffffffff, INTERRUPT_ACK);*/
if (dev->msi == 2) {
ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE);
ddbwritel(dev, 0x0000000f, MSI1_ENABLE);
} else {
ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE);
ddbwritel(dev, 0x00000000, MSI1_ENABLE);
}
return stat;
}
static int __devinit ddb_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ddb *dev;
int stat = 0;
int irq_flag = IRQF_SHARED;
if (pci_enable_device(pdev) < 0)
return -ENODEV;
@ -151,69 +290,6 @@ static int __devinit ddb_probe(struct pci_dev *pdev,
ddbwritel(dev, 0x00, TS_OUTPUT_CONTROL(i));
usleep_range(5000, 6000);
}
ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE);
ddbwritel(dev, 0x00000000, MSI1_ENABLE);
ddbwritel(dev, 0x00000000, MSI2_ENABLE);
ddbwritel(dev, 0x00000000, MSI3_ENABLE);
ddbwritel(dev, 0x00000000, MSI4_ENABLE);
ddbwritel(dev, 0x00000000, MSI5_ENABLE);
ddbwritel(dev, 0x00000000, MSI6_ENABLE);
ddbwritel(dev, 0x00000000, MSI7_ENABLE);
#ifdef CONFIG_PCI_MSI
if (msi && pci_msi_enabled()) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
stat = pci_enable_msi_range(dev->pdev, 1, 2);
if (stat >= 1) {
dev->msi = stat;
pr_info("DDBridge: using %d MSI interrupt(s)\n",
dev->msi);
irq_flag = 0;
} else
pr_info("DDBridge: MSI not available.\n");
#else
stat = pci_enable_msi_block(dev->pdev, 2);
if (stat == 0) {
dev->msi = 2;
pr_info("DDBridge: using 2 MSI interrupts\n");
}
if (stat == 1)
stat = pci_enable_msi(dev->pdev);
if (stat < 0) {
pr_info("DDBridge: MSI not available.\n");
} else {
irq_flag = 0;
dev->msi++;
}
#endif
}
if (dev->msi == 2) {
stat = request_irq(dev->pdev->irq, irq_handler0,
irq_flag, "ddbridge", (void *) dev);
if (stat < 0)
goto fail0;
stat = request_irq(dev->pdev->irq + 1, irq_handler1,
irq_flag, "ddbridge", (void *) dev);
if (stat < 0) {
free_irq(dev->pdev->irq, dev);
goto fail0;
}
} else
#endif
{
#ifdef DDB_TEST_THREADED
stat = request_threaded_irq(dev->pdev->irq, irq_handler,
irq_thread,
irq_flag,
"ddbridge", (void *) dev);
#else
stat = request_irq(dev->pdev->irq, irq_handler,
irq_flag, "ddbridge", (void *) dev);
#endif
if (stat < 0)
goto fail0;
}
ddbwritel(dev, 0, DMA_BASE_READ);
if (dev->link[0].info->type != DDB_MOD)
ddbwritel(dev, 0, DMA_BASE_WRITE);
@ -223,22 +299,14 @@ static int __devinit ddb_probe(struct pci_dev *pdev,
dev->link[0].info->port_num = 4;
}
/*ddbwritel(dev, 0xffffffff, INTERRUPT_ACK);*/
if (dev->msi == 2) {
ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE);
ddbwritel(dev, 0x0000000f, MSI1_ENABLE);
} else {
ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE);
ddbwritel(dev, 0x00000000, MSI1_ENABLE);
}
stat = ddb_irq_init(dev);
if (stat < 0)
goto fail0;
if (ddb_init(dev) == 0)
return 0;
ddbwritel(dev, 0, INTERRUPT_ENABLE);
ddbwritel(dev, 0, MSI1_ENABLE);
free_irq(dev->pdev->irq, dev);
if (dev->msi == 2)
free_irq(dev->pdev->irq + 1, dev);
ddb_irq_exit(dev);
fail0:
pr_err("fail0\n");
if (dev->msi)
@ -456,6 +524,24 @@ static struct ddb_info ddb_octopus_net = {
.mdio_num = 1,
};
static struct ddb_info ddb_octopro_hdin = {
.type = DDB_OCTOPRO_HDIN,
.name = "Digital Devices OctopusNet Pro HDIN",
.regmap = &octopus_map,
.port_num = 1,
.i2c_mask = 0x00,
.mdio_num = 1,
};
static struct ddb_info ddb_octopro = {
.type = DDB_OCTOPRO,
.name = "Digital Devices OctopusNet Pro",
.regmap = &octopus_map,
.port_num = 1,
.i2c_mask = 0x00,
.mdio_num = 1,
};
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
@ -493,7 +579,13 @@ static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
DDB_ID(DDVID, 0x0013, DDVID, 0x0043, ddb_ci_s2_pro),
DDB_ID(DDVID, 0x0201, DDVID, 0x0001, ddb_mod),
DDB_ID(DDVID, 0x0201, DDVID, 0x0002, ddb_mod),
/* testing on OctopusNet Pro */
DDB_ID(DDVID, 0x0320, PCI_ANY_ID, PCI_ANY_ID, ddb_octopus_net),
DDB_ID(DDVID, 0x0321, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
DDB_ID(DDVID, 0x0322, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
DDB_ID(DDVID, 0x0323, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
DDB_ID(DDVID, 0x0328, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
DDB_ID(DDVID, 0x0329, PCI_ANY_ID, PCI_ANY_ID, ddb_octopro_hdin),
/* in case sub-ids got deleted in flash */
DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
DDB_ID(DDVID, 0x0005, PCI_ANY_ID, PCI_ANY_ID, ddb_none),

View File

@ -148,6 +148,8 @@ struct ddb_info {
#define DDB_OCTONET 4
#define DDB_OCTOPUS_MAX 5
#define DDB_OCTOPUS_MAX_CT 6
#define DDB_OCTOPRO 7
#define DDB_OCTOPRO_HDIN 8
char *name;
u32 i2c_mask;
u8 port_num;
@ -415,8 +417,8 @@ struct ddb {
struct dvb_adapter adap[DDB_MAX_INPUT];
struct ddb_dma dma[DDB_MAX_INPUT + DDB_MAX_OUTPUT];
void (*handler[128])(unsigned long);
unsigned long handler_data[128];
void (*handler[4][128])(unsigned long);
unsigned long handler_data[4][128];
struct device *ddb_dev;
u32 ddb_dev_users;