add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
en_resources.o en_netdev.o

View File

@@ -0,0 +1,436 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include "mlx4.h"
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
{
u32 obj;
spin_lock(&bitmap->lock);
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_first_zero_bit(bitmap->table, bitmap->max);
}
if (obj < bitmap->max) {
set_bit(obj, bitmap->table);
bitmap->last = (obj + 1);
if (bitmap->last == bitmap->max)
bitmap->last = 0;
obj |= bitmap->top;
} else
obj = -1;
spin_unlock(&bitmap->lock);
return obj;
}
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
{
mlx4_bitmap_free_range(bitmap, obj, 1);
}
static unsigned long find_aligned_range(unsigned long *bitmap,
u32 start, u32 nbits,
int len, int align)
{
unsigned long end, i;
again:
start = ALIGN(start, align);
while ((start < nbits) && test_bit(start, bitmap))
start += align;
if (start >= nbits)
return -1;
end = start+len;
if (end > nbits)
return -1;
for (i = start + 1; i < end; i++) {
if (test_bit(i, bitmap)) {
start = i + 1;
goto again;
}
}
return start;
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
u32 obj, i;
if (likely(cnt == 1 && align == 1))
return mlx4_bitmap_alloc(bitmap);
spin_lock(&bitmap->lock);
obj = find_aligned_range(bitmap->table, bitmap->last,
bitmap->max, cnt, align);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_aligned_range(bitmap->table, 0, bitmap->max,
cnt, align);
}
if (obj < bitmap->max) {
for (i = 0; i < cnt; i++)
set_bit(obj + i, bitmap->table);
if (obj == bitmap->last) {
bitmap->last = (obj + cnt);
if (bitmap->last >= bitmap->max)
bitmap->last = 0;
}
obj |= bitmap->top;
} else
obj = -1;
spin_unlock(&bitmap->lock);
return obj;
}
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
{
u32 i;
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
}
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
int i;
/* num must be a power of 2 */
if (num != roundup_pow_of_two(num))
return -EINVAL;
bitmap->last = 0;
bitmap->top = 0;
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
sizeof (long), GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table);
return 0;
}
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
{
kfree(bitmap->table);
}
/*
* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. If the
* requested size is > max_direct, we split the allocation into
* multiple pages, so we don't require too much contiguous memory.
*/
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf)
{
dma_addr_t t;
if (size <= max_direct) {
buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
memset(buf->direct.buf, 0, size);
} else {
int i;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (BITS_PER_LONG == 64) {
struct page **pages;
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
}
}
return 0;
err_free:
mlx4_buf_free(dev, size, buf);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{
int i;
if (buf->nbufs == 1)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->direct.map);
else {
if (BITS_PER_LONG == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
{
struct mlx4_db_pgdir *pgdir;
pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
if (!pgdir)
return NULL;
bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
&pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
}
return pgdir;
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
struct mlx4_db *db, int order)
{
int o;
int i;
for (o = order; o <= 1; ++o) {
i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
if (i < MLX4_DB_PER_PAGE >> o)
goto found;
}
return -ENOMEM;
found:
clear_bit(i, pgdir->bits[o]);
i <<= o;
if (o > order)
set_bit(i ^ 1, pgdir->bits[order]);
db->u.pgdir = pgdir;
db->index = i;
db->db = pgdir->db_page + db->index;
db->dma = pgdir->db_dma + db->index * 4;
db->order = order;
return 0;
}
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
int ret = 0;
mutex_lock(&priv->pgdir_mutex);
list_for_each_entry(pgdir, &priv->pgdir_list, list)
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
if (!pgdir) {
ret = -ENOMEM;
goto out;
}
list_add(&pgdir->list, &priv->pgdir_list);
/* This should never fail -- we just allocated an empty page: */
WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
out:
mutex_unlock(&priv->pgdir_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mlx4_db_alloc);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int o;
int i;
mutex_lock(&priv->pgdir_mutex);
o = db->order;
i = db->index;
if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
clear_bit(i ^ 1, db->u.pgdir->order0);
++o;
}
i >>= o;
set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
kfree(db->u.pgdir);
}
mutex_unlock(&priv->pgdir_mutex);
}
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size, int max_direct)
{
int err;
err = mlx4_db_alloc(dev, &wqres->db, 1);
if (err)
return err;
*wqres->db.db = 0;
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
if (err)
goto err_db;
err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
&wqres->mtt);
if (err)
goto err_buf;
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
if (err)
goto err_mtt;
return 0;
err_mtt:
mlx4_mtt_cleanup(dev, &wqres->mtt);
err_buf:
mlx4_buf_free(dev, size, &wqres->buf);
err_db:
mlx4_db_free(dev, &wqres->db);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size)
{
mlx4_mtt_cleanup(dev, &wqres->mtt);
mlx4_buf_free(dev, size, &wqres->buf);
mlx4_db_free(dev, &wqres->db);
}
EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);

View File

@@ -0,0 +1,156 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/workqueue.h>
#include "mlx4.h"
enum {
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
};
static DEFINE_SPINLOCK(catas_lock);
static LIST_HEAD(catas_list);
static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
"Reset device on internal errors if non-zero (default 1)");
static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
}
static void poll_catas(unsigned long dev_ptr)
{
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
struct mlx4_priv *priv = mlx4_priv(dev);
if (readl(priv->catas_err.map)) {
dump_err_buf(dev);
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
if (internal_err_reset) {
spin_lock(&catas_lock);
list_add(&priv->catas_err.list, &catas_list);
spin_unlock(&catas_lock);
queue_work(mlx4_wq, &catas_work);
}
} else
mod_timer(&priv->catas_err.timer,
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
}
static void catas_reset(struct work_struct *work)
{
struct mlx4_priv *priv, *tmppriv;
struct mlx4_dev *dev;
LIST_HEAD(tlist);
int ret;
spin_lock_irq(&catas_lock);
list_splice_init(&catas_list, &tlist);
spin_unlock_irq(&catas_lock);
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
struct pci_dev *pdev = priv->dev.pdev;
ret = mlx4_restart_one(priv->dev.pdev);
/* 'priv' now is not valid */
if (ret)
printk(KERN_ERR "mlx4 %s: Reset failed (%d)\n",
pci_name(pdev), ret);
else {
dev = pci_get_drvdata(pdev);
mlx4_dbg(dev, "Reset succeeded\n");
}
}
}
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long addr;
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
if (!priv->catas_err.map) {
mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
addr);
return;
}
priv->catas_err.timer.data = (unsigned long) dev;
priv->catas_err.timer.function = poll_catas;
priv->catas_err.timer.expires =
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
add_timer(&priv->catas_err.timer);
}
void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
del_timer_sync(&priv->catas_err.timer);
if (priv->catas_err.map)
iounmap(priv->catas_err.map);
spin_lock_irq(&catas_lock);
list_del(&priv->catas_err.list);
spin_unlock_irq(&catas_lock);
}
void __init mlx4_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
}

View File

@@ -0,0 +1,442 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
#include <asm/io.h>
#include "mlx4.h"
#define CMD_POLL_TOKEN 0xffff
enum {
/* command completed successfully: */
CMD_STAT_OK = 0x00,
/* Internal error (such as a bus error) occurred while processing command: */
CMD_STAT_INTERNAL_ERR = 0x01,
/* Operation/command not supported or opcode modifier not supported: */
CMD_STAT_BAD_OP = 0x02,
/* Parameter not supported or parameter out of range: */
CMD_STAT_BAD_PARAM = 0x03,
/* System not enabled or bad system state: */
CMD_STAT_BAD_SYS_STATE = 0x04,
/* Attempt to access reserved or unallocaterd resource: */
CMD_STAT_BAD_RESOURCE = 0x05,
/* Requested resource is currently executing a command, or is otherwise busy: */
CMD_STAT_RESOURCE_BUSY = 0x06,
/* Required capability exceeds device limits: */
CMD_STAT_EXCEED_LIM = 0x08,
/* Resource is not in the appropriate state or ownership: */
CMD_STAT_BAD_RES_STATE = 0x09,
/* Index out of range: */
CMD_STAT_BAD_INDEX = 0x0a,
/* FW image corrupted: */
CMD_STAT_BAD_NVMEM = 0x0b,
/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
CMD_STAT_ICM_ERROR = 0x0c,
/* Attempt to modify a QP/EE which is not in the presumed state: */
CMD_STAT_BAD_QP_STATE = 0x10,
/* Bad segment parameters (Address/Size): */
CMD_STAT_BAD_SEG_PARAM = 0x20,
/* Memory Region has Memory Windows bound to: */
CMD_STAT_REG_BOUND = 0x21,
/* HCA local attached memory not present: */
CMD_STAT_LAM_NOT_PRE = 0x22,
/* Bad management packet (silently discarded): */
CMD_STAT_BAD_PKT = 0x30,
/* More outstanding CQEs in CQ than new CQ size: */
CMD_STAT_BAD_SIZE = 0x40,
/* Multi Function device support required: */
CMD_STAT_MULTI_FUNC_REQ = 0x50,
};
enum {
HCR_IN_PARAM_OFFSET = 0x00,
HCR_IN_MODIFIER_OFFSET = 0x08,
HCR_OUT_PARAM_OFFSET = 0x0c,
HCR_TOKEN_OFFSET = 0x14,
HCR_STATUS_OFFSET = 0x18,
HCR_OPMOD_SHIFT = 12,
HCR_T_BIT = 21,
HCR_E_BIT = 22,
HCR_GO_BIT = 23
};
enum {
GO_BIT_TIMEOUT_MSECS = 10000
};
struct mlx4_cmd_context {
struct completion done;
int result;
int next;
u64 out_param;
u16 token;
};
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
[CMD_STAT_INTERNAL_ERR] = -EIO,
[CMD_STAT_BAD_OP] = -EPERM,
[CMD_STAT_BAD_PARAM] = -EINVAL,
[CMD_STAT_BAD_SYS_STATE] = -ENXIO,
[CMD_STAT_BAD_RESOURCE] = -EBADF,
[CMD_STAT_RESOURCE_BUSY] = -EBUSY,
[CMD_STAT_EXCEED_LIM] = -ENOMEM,
[CMD_STAT_BAD_RES_STATE] = -EBADF,
[CMD_STAT_BAD_INDEX] = -EBADF,
[CMD_STAT_BAD_NVMEM] = -EFAULT,
[CMD_STAT_ICM_ERROR] = -ENFILE,
[CMD_STAT_BAD_QP_STATE] = -EINVAL,
[CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
[CMD_STAT_REG_BOUND] = -EBUSY,
[CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
[CMD_STAT_BAD_PKT] = -EINVAL,
[CMD_STAT_BAD_SIZE] = -ENOMEM,
[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
};
if (status >= ARRAY_SIZE(trans_table) ||
(status != CMD_STAT_OK && trans_table[status] == 0))
return -EIO;
return trans_table[status];
}
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
return (status & swab32(1 << HCR_GO_BIT)) ||
(mlx4_priv(dev)->cmd.toggle ==
!!(status & swab32(1 << HCR_T_BIT)));
}
static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op, u16 token,
int event)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
u32 __iomem *hcr = cmd->hcr;
int ret = -EAGAIN;
unsigned long end;
mutex_lock(&cmd->hcr_mutex);
end = jiffies;
if (event)
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
if (time_after_eq(jiffies, end))
goto out;
cond_resched();
}
/*
* We use writel (instead of something like memcpy_toio)
* because writes of less than 32 bits to the HCR don't work
* (and some architectures such as ia64 implement memcpy_toio
* in terms of writeb).
*/
__raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
__raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
__raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
__raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
/* __raw_writel may not order writes. */
wmb();
__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
op), hcr + 6);
/*
* Make sure that our HCR writes don't get mixed in with
* writes from another CPU starting a FW command.
*/
mmiowb();
cmd->toggle = cmd->toggle ^ 1;
ret = 0;
out:
mutex_unlock(&cmd->hcr_mutex);
return ret;
}
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
struct mlx4_priv *priv = mlx4_priv(dev);
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
down(&priv->cmd.poll_sem);
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
goto out;
end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(dev) && time_before(jiffies, end))
cond_resched();
if (cmd_pending(dev)) {
err = -ETIMEDOUT;
goto out;
}
if (out_is_imm)
*out_param =
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
out:
up(&priv->cmd.poll_sem);
return err;
}
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_context *context =
&priv->cmd.context[token & priv->cmd.token_mask];
/* previously timed out command completing at long last */
if (token != context->token)
return;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
complete(&context->done);
}
static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
int err = 0;
down(&cmd->event_sem);
spin_lock(&cmd->context_lock);
BUG_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
context->token += cmd->token_mask + 1;
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
init_completion(&context->done);
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
err = -EBUSY;
goto out;
}
err = context->result;
if (err)
goto out;
if (out_is_imm)
*out_param = context->out_param;
out:
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
cmd->free_head = context - cmd->context;
spin_unlock(&cmd->context_lock);
up(&cmd->event_sem);
return err;
}
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
if (mlx4_priv(dev)->cmd.use_events)
return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
else
return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_init(&priv->cmd.hcr_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register.");
return -ENOMEM;
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
if (!priv->cmd.pool) {
iounmap(priv->cmd.hcr);
return -ENOMEM;
}
return 0;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
pci_pool_destroy(priv->cmd.pool);
iounmap(priv->cmd.hcr);
}
/*
* Switch to using events to issue FW commands (can only be called
* after event queue for command events has been initialized).
*/
int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
priv->cmd.context = kmalloc(priv->cmd.max_cmds *
sizeof (struct mlx4_cmd_context),
GFP_KERNEL);
if (!priv->cmd.context)
return -ENOMEM;
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
}
priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
priv->cmd.token_mask <<= 1)
; /* nothing */
--priv->cmd.token_mask;
priv->cmd.use_events = 1;
down(&priv->cmd.poll_sem);
return 0;
}
/*
* Switch back to polling (used when shutting down the device)
*/
void mlx4_cmd_use_polling(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
up(&priv->cmd.poll_sem);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
if (!mailbox)
return ERR_PTR(-ENOMEM);
mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
&mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);

View File

@@ -0,0 +1,318 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/hardirq.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
#include "mlx4.h"
#include "icm.h"
struct mlx4_cq_context {
__be32 flags;
u16 reserved1[3];
__be16 page_offset;
__be32 logsize_usrpage;
__be16 cq_period;
__be16 cq_max_count;
u8 reserved2[3];
u8 comp_eqn;
u8 log_page_size;
u8 reserved3[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
__be32 last_notified_index;
__be32 solicit_producer_index;
__be32 consumer_index;
__be32 producer_index;
u32 reserved4[2];
__be64 db_rec_addr;
};
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_CQ_FLAG_CC ( 1 << 18)
#define MLX4_CQ_FLAG_OI ( 1 << 17)
#define MLX4_CQ_STATE_ARMED ( 9 << 8)
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
#define MLX4_EQ_STATE_FIRED (10 << 8)
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (!cq) {
mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
++cq->arm_sn;
cq->comp(cq);
}
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
{
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
spin_lock(&cq_table->lock);
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
spin_unlock(&cq_table->lock);
if (!cq) {
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
cq->event(cq, event_type);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
MLX4_CMD_TIME_CLASS_A);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
u16 count, u16 period)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_cq_context *cq_context;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
cq_context->cq_max_count = cpu_to_be16(count);
cq_context->cq_period = cpu_to_be16(period);
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_cq_modify);
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
int entries, struct mlx4_mtt *mtt)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_cq_context *cq_context;
u64 mtt_addr;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cq_context->log_page_size = mtt->page_shift - 12;
mtt_addr = mlx4_mtt_addr(dev, mtt);
cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_cq_context *cq_context;
u64 mtt_addr;
int err;
if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
if (cq->cqn == -1)
return -ENOMEM;
err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
if (err)
goto err_put;
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err)
goto err_cmpt_put;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_radix;
}
cq_context = mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
cq_context->flags = cpu_to_be32(!!collapsed << 18);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
cq_context->db_rec_addr = cpu_to_be64(db_rec);
err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
cq->cons_index = 0;
cq->arm_sn = 1;
cq->uar = uar;
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
return 0;
err_radix:
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
err_cmpt_put:
mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
err_put:
mlx4_table_put(dev, &cq_table->table, cq->cqn);
err_out:
mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
int err;
err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
synchronize_irq(priv->eq_table.eq[cq->vector].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
mlx4_table_put(dev, &cq_table->table, cq->cqn);
mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
int mlx4_init_cq_table(struct mlx4_dev *dev)
{
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
int err;
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
if (err)
return err;
return 0;
}
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}

View File

@@ -0,0 +1,154 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
{
return;
}
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
cq->size = entries;
if (mode == RX) {
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->vector = ring % mdev->dev->caps.num_comp_vectors;
} else {
cq->buf_size = sizeof(struct mlx4_cqe);
cq->vector = 0;
}
cq->ring = ring;
cq->is_tx = mode;
spin_lock_init(&cq->lock);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
if (err)
return err;
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
else
cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
return err;
}
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
cq->mcq.arm_db = cq->wqres.db.db + 1;
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
if (!cq->is_tx)
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
if (err)
return err;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
if (cq->is_tx) {
init_timer(&cq->timer);
cq->timer.function = mlx4_en_poll_tx_cq;
cq->timer.data = (unsigned long) cq;
} else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_enable(&cq->napi);
}
return 0;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
cq->buf_size = 0;
cq->buf = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
if (cq->is_tx)
del_timer(&cq->timer);
else {
napi_disable(&cq->napi);
netif_napi_del(&cq->napi);
}
mlx4_cq_free(mdev->dev, &cq->mcq);
}
/* Set rx cq moderation parameters */
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
cq->moder_cnt, cq->moder_time);
}
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
&priv->mdev->uar_lock);
return 0;
}

View File

@@ -0,0 +1,426 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include "mlx4_en.h"
#include "en_port.h"
static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
{
int i;
priv->port_stats.lro_aggregated = 0;
priv->port_stats.lro_flushed = 0;
priv->port_stats.lro_no_desc = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
}
}
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
sprintf(drvinfo->fw_version, "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
static u32 mlx4_en_get_tso(struct net_device *dev)
{
return (dev->features & NETIF_F_TSO) != 0;
}
static int mlx4_en_set_tso(struct net_device *dev, u32 data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (data) {
if (!priv->mdev->LSO_support)
return -EPERM;
dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
} else
dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
return 0;
}
static u32 mlx4_en_get_rx_csum(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return priv->rx_csum;
}
static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
priv->rx_csum = (data != 0);
return 0;
}
static const char main_strings[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* port statistics */
"lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
/* packet statistics */
"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
"rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
"tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
"tx_prio_6", "tx_prio_7",
};
#define NUM_MAIN_STATS 21
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
static u32 mlx4_en_get_msglevel(struct net_device *dev)
{
return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
}
static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
{
((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
}
static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
return;
}
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
}
static void mlx4_en_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, uint64_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
int i;
spin_lock_bh(&priv->stats_lock);
mlx4_en_update_lro_stats(priv);
for (i = 0; i < NUM_MAIN_STATS; i++)
data[index++] = ((unsigned long *) &priv->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++)
data[index++] = ((unsigned long *) &priv->port_stats)[i];
for (i = 0; i < priv->tx_ring_num; i++) {
data[index++] = priv->tx_ring[i].packets;
data[index++] = priv->tx_ring[i].bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
}
for (i = 0; i < NUM_PKT_STATS; i++)
data[index++] = ((unsigned long *) &priv->pkstats)[i];
spin_unlock_bh(&priv->stats_lock);
}
static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
int i;
if (stringset != ETH_SS_STATS)
return;
/* Add main counters */
for (i = 0; i < NUM_MAIN_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
for (i = 0; i < NUM_PORT_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS]);
for (i = 0; i < priv->tx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_bytes", i);
}
for (i = 0; i < priv->rx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
}
for (i = 0; i < NUM_PKT_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
}
static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->autoneg = AUTONEG_DISABLE;
cmd->supported = SUPPORTED_10000baseT_Full;
cmd->advertising = ADVERTISED_1000baseT_Full;
if (netif_carrier_ok(dev)) {
cmd->speed = SPEED_10000;
cmd->duplex = DUPLEX_FULL;
} else {
cmd->speed = -1;
cmd->duplex = -1;
}
return 0;
}
static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
if ((cmd->autoneg == AUTONEG_ENABLE) ||
(cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
return -EINVAL;
/* Nothing to change */
return 0;
}
static int mlx4_en_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
coal->tx_coalesce_usecs = 0;
coal->tx_max_coalesced_frames = 0;
coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames;
coal->pkt_rate_low = priv->pkt_rate_low;
coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
coal->pkt_rate_high = priv->pkt_rate_high;
coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
return 0;
}
static int mlx4_en_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int err, i;
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
coal->rx_max_coalesced_frames;
priv->rx_usecs = (coal->rx_coalesce_usecs ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TIME :
coal->rx_coalesce_usecs;
/* Set adaptive coalescing params */
priv->pkt_rate_low = coal->pkt_rate_low;
priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
priv->pkt_rate_high = coal->pkt_rate_high;
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
priv->last_moder_time = MLX4_EN_AUTO_CONF;
if (priv->adaptive_rx_coal)
return 0;
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_cq[i].moder_cnt = priv->rx_frames;
priv->rx_cq[i].moder_time = priv->rx_usecs;
err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
if (err)
return err;
}
return 0;
}
static int mlx4_en_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
priv->prof->tx_pause = pause->tx_pause != 0;
priv->prof->rx_pause = pause->rx_pause != 0;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
return err;
}
static void mlx4_en_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
pause->tx_pause = priv->prof->tx_pause;
pause->rx_pause = priv->prof->rx_pause;
}
static int mlx4_en_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
rx_size = roundup_pow_of_two(param->rx_pending);
rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == priv->prof->rx_ring_size &&
tx_size == priv->prof->tx_ring_size)
return 0;
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev);
}
mlx4_en_free_resources(priv);
priv->prof->tx_ring_size = tx_size;
priv->prof->rx_ring_size = rx_size;
err = mlx4_en_alloc_resources(priv);
if (err) {
en_err(priv, "Failed reallocating port resources\n");
goto out;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
memset(param, 0, sizeof(*param));
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
.set_settings = mlx4_en_set_settings,
#ifdef NETIF_F_TSO
.get_tso = mlx4_en_get_tso,
.set_tso = mlx4_en_set_tso,
#endif
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_rx_csum = mlx4_en_get_rx_csum,
.set_rx_csum = mlx4_en_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_strings = mlx4_en_get_strings,
.get_sset_count = mlx4_en_get_sset_count,
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
.get_wol = mlx4_en_get_wol,
.get_msglevel = mlx4_en_get_msglevel,
.set_msglevel = mlx4_en_set_msglevel,
.get_coalesce = mlx4_en_get_coalesce,
.set_coalesce = mlx4_en_set_coalesce,
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};

View File

@@ -0,0 +1,282 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
#define MLX4_EN_PARM_INT(X, def_val, desc) \
static unsigned int X = def_val;\
module_param(X , uint, 0444); \
MODULE_PARM_DESC(X, desc);
/*
* Device scope module parameters
*/
/* Use a XOR rathern than Toeplitz hash function for RSS */
MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
"Number of LRO sessions per ring or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
" Per priority bit mask");
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
int i;
params->rss_xor = (rss_xor != 0);
params->rss_mask = rss_mask & 0x1f;
params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
params->prof[i].rx_pause = 1;
params->prof[i].rx_ppp = pfcrx;
params->prof[i].tx_pause = 1;
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
}
return 0;
}
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port)
{
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv;
if (!mdev->pndev[port])
return;
priv = netdev_priv(mdev->pndev[port]);
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
case MLX4_DEV_EVENT_PORT_DOWN:
/* To prevent races, we poll the link state in a separate
task rather than changing it here */
priv->link_state = event;
queue_work(mdev->workqueue, &priv->linkstate_task);
break;
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
default:
mlx4_warn(mdev, "Unhandled event: %d\n", event);
}
}
static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
{
struct mlx4_en_dev *mdev = endev_ptr;
int i;
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
mutex_unlock(&mdev->state_lock);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
mlx4_mr_free(dev, &mdev->mr);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
kfree(mdev);
}
static void *mlx4_en_add(struct mlx4_dev *dev)
{
static int mlx4_en_version_printed;
struct mlx4_en_dev *mdev;
int i;
int err;
if (!mlx4_en_version_printed) {
printk(KERN_INFO "%s", mlx4_en_version);
mlx4_en_version_printed++;
}
mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
if (!mdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
goto err_free_dev;
if (mlx4_uar_alloc(dev, &mdev->priv_uar))
goto err_pd;
mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!mdev->uar_map)
goto err_uar;
spin_lock_init(&mdev->uar_lock);
mdev->dev = dev;
mdev->dma_device = &(dev->pdev->dev);
mdev->pdev = dev->pdev;
mdev->device_up = false;
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
mlx4_warn(mdev, "LSO not supported, please upgrade to later "
"FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
0, 0, &mdev->mr)) {
mlx4_err(mdev, "Failed allocating memory region\n");
goto err_uar;
}
if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
mlx4_err(mdev, "Failed enabling memory region\n");
goto err_mr;
}
/* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev);
if (err) {
mlx4_err(mdev, "Bad module parameters, aborting.\n");
goto err_mr;
}
/* Configure wich ports to start according to module parameters */
mdev->port_cnt = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
/* If we did not receive an explicit number of Rx rings, default to
* the number of completion vectors populated by the mlx4_core */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
mdev->profile.prof[i].tx_ring_num, i);
mdev->profile.prof[i].rx_ring_num = min_t(int,
roundup_pow_of_two(dev->caps.num_comp_vectors),
MAX_RX_RINGS);
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
mdev->profile.prof[i].rx_ring_num, i);
}
/* Create our own workqueue for reset/multicast tasks
* Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) {
err = -ENOMEM;
goto err_mr;
}
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
mutex_init(&mdev->state_lock);
mdev->device_up = true;
/* Setup ports */
/* Create a netdev for each port */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Activating port:%d\n", i);
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
}
return mdev;
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err_uar:
mlx4_uar_free(dev, &mdev->priv_uar);
err_pd:
mlx4_pd_free(dev, mdev->priv_pdn);
err_free_dev:
kfree(mdev);
err_free_res:
return NULL;
}
static struct mlx4_interface mlx4_en_interface = {
.add = mlx4_en_add,
.remove = mlx4_en_remove,
.event = mlx4_en_event,
};
static int __init mlx4_en_init(void)
{
return mlx4_register_interface(&mlx4_en_interface);
}
static void __exit mlx4_en_cleanup(void)
{
mlx4_unregister_interface(&mlx4_en_interface);
}
module_init(mlx4_en_init);
module_exit(mlx4_en_cleanup);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,242 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/if_vlan.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include "en_port.h"
#include "mlx4_en.h"
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
u64 mac, u64 clear, u8 mode)
{
return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
}
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vlan_fltr_mbox *filter;
int i;
int j;
int index = 0;
u32 entry;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
filter = mailbox->buf;
if (grp) {
memset(filter, 0, sizeof *filter);
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
if (vlan_group_get_device(grp, index++))
entry |= 1 << j;
filter->entry[i] = cpu_to_be32(entry);
}
} else {
/* When no vlans are configured we block all vlans */
memset(filter, 0, sizeof(*filter));
}
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof *context);
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
context->pfctx = pfctx;
context->pprx = (pprx * (!pfcrx)) << 7;
context->pfcrx = pfcrx;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
context->vlan_miss = MLX4_VLAN_MISS_IDX;
in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
struct net_device_stats *stats = &priv->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
int i;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
if (err)
goto out;
mlx4_en_stats = mailbox->buf;
spin_lock_bh(&priv->stats_lock);
stats->rx_packets = 0;
stats->rx_bytes = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i].packets;
stats->rx_bytes += priv->rx_ring[i].bytes;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
for (i = 0; i <= priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets;
stats->tx_bytes += priv->tx_ring[i].bytes;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RdropLength) +
be32_to_cpu(mlx4_en_stats->RJBBR) +
be32_to_cpu(mlx4_en_stats->RCRC) +
be32_to_cpu(mlx4_en_stats->RRUNT);
stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->MCAST_novlan);
stats->collisions = 0;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = 0;
stats->tx_fifo_errors = 0;
stats->tx_heartbeat_errors = 0;
stats->tx_window_errors = 0;
priv->pkstats.broadcast =
be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
spin_unlock_bh(&priv->stats_lock);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
}

View File

@@ -0,0 +1,570 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _MLX4_EN_PORT_H_
#define _MLX4_EN_PORT_H_
#define SET_PORT_GEN_ALL_VALID 0x7
#define SET_PORT_PROMISC_SHIFT 31
enum {
MLX4_CMD_SET_VLAN_FLTR = 0x47,
MLX4_CMD_SET_MCAST_FLTR = 0x48,
MLX4_CMD_DUMP_ETH_STATS = 0x49,
};
struct mlx4_set_port_general_context {
u8 reserved[3];
u8 flags;
u16 reserved2;
__be16 mtu;
u8 pptx;
u8 pfctx;
u16 reserved3;
u8 pprx;
u8 pfcrx;
u16 reserved4;
};
struct mlx4_set_port_rqp_calc_context {
__be32 base_qpn;
__be32 flags;
u8 reserved[3];
u8 mac_miss;
u8 intra_no_vlan;
u8 no_vlan;
u8 intra_vlan_miss;
u8 vlan_miss;
u8 reserved2[3];
u8 no_vlan_prio;
__be32 promisc;
__be32 mcast;
};
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
__be32 entry[VLAN_FLTR_SIZE];
};
enum {
MLX4_MCAST_CONFIG = 0,
MLX4_MCAST_DISABLE = 1,
MLX4_MCAST_ENABLE = 2,
};
struct mlx4_en_stat_out_mbox {
/* Received frames with a length of 64 octets */
__be64 R64_prio_0;
__be64 R64_prio_1;
__be64 R64_prio_2;
__be64 R64_prio_3;
__be64 R64_prio_4;
__be64 R64_prio_5;
__be64 R64_prio_6;
__be64 R64_prio_7;
__be64 R64_novlan;
/* Received frames with a length of 127 octets */
__be64 R127_prio_0;
__be64 R127_prio_1;
__be64 R127_prio_2;
__be64 R127_prio_3;
__be64 R127_prio_4;
__be64 R127_prio_5;
__be64 R127_prio_6;
__be64 R127_prio_7;
__be64 R127_novlan;
/* Received frames with a length of 255 octets */
__be64 R255_prio_0;
__be64 R255_prio_1;
__be64 R255_prio_2;
__be64 R255_prio_3;
__be64 R255_prio_4;
__be64 R255_prio_5;
__be64 R255_prio_6;
__be64 R255_prio_7;
__be64 R255_novlan;
/* Received frames with a length of 511 octets */
__be64 R511_prio_0;
__be64 R511_prio_1;
__be64 R511_prio_2;
__be64 R511_prio_3;
__be64 R511_prio_4;
__be64 R511_prio_5;
__be64 R511_prio_6;
__be64 R511_prio_7;
__be64 R511_novlan;
/* Received frames with a length of 1023 octets */
__be64 R1023_prio_0;
__be64 R1023_prio_1;
__be64 R1023_prio_2;
__be64 R1023_prio_3;
__be64 R1023_prio_4;
__be64 R1023_prio_5;
__be64 R1023_prio_6;
__be64 R1023_prio_7;
__be64 R1023_novlan;
/* Received frames with a length of 1518 octets */
__be64 R1518_prio_0;
__be64 R1518_prio_1;
__be64 R1518_prio_2;
__be64 R1518_prio_3;
__be64 R1518_prio_4;
__be64 R1518_prio_5;
__be64 R1518_prio_6;
__be64 R1518_prio_7;
__be64 R1518_novlan;
/* Received frames with a length of 1522 octets */
__be64 R1522_prio_0;
__be64 R1522_prio_1;
__be64 R1522_prio_2;
__be64 R1522_prio_3;
__be64 R1522_prio_4;
__be64 R1522_prio_5;
__be64 R1522_prio_6;
__be64 R1522_prio_7;
__be64 R1522_novlan;
/* Received frames with a length of 1548 octets */
__be64 R1548_prio_0;
__be64 R1548_prio_1;
__be64 R1548_prio_2;
__be64 R1548_prio_3;
__be64 R1548_prio_4;
__be64 R1548_prio_5;
__be64 R1548_prio_6;
__be64 R1548_prio_7;
__be64 R1548_novlan;
/* Received frames with a length of 1548 < octets < MTU */
__be64 R2MTU_prio_0;
__be64 R2MTU_prio_1;
__be64 R2MTU_prio_2;
__be64 R2MTU_prio_3;
__be64 R2MTU_prio_4;
__be64 R2MTU_prio_5;
__be64 R2MTU_prio_6;
__be64 R2MTU_prio_7;
__be64 R2MTU_novlan;
/* Received frames with a length of MTU< octets and good CRC */
__be64 RGIANT_prio_0;
__be64 RGIANT_prio_1;
__be64 RGIANT_prio_2;
__be64 RGIANT_prio_3;
__be64 RGIANT_prio_4;
__be64 RGIANT_prio_5;
__be64 RGIANT_prio_6;
__be64 RGIANT_prio_7;
__be64 RGIANT_novlan;
/* Received broadcast frames with good CRC */
__be64 RBCAST_prio_0;
__be64 RBCAST_prio_1;
__be64 RBCAST_prio_2;
__be64 RBCAST_prio_3;
__be64 RBCAST_prio_4;
__be64 RBCAST_prio_5;
__be64 RBCAST_prio_6;
__be64 RBCAST_prio_7;
__be64 RBCAST_novlan;
/* Received multicast frames with good CRC */
__be64 MCAST_prio_0;
__be64 MCAST_prio_1;
__be64 MCAST_prio_2;
__be64 MCAST_prio_3;
__be64 MCAST_prio_4;
__be64 MCAST_prio_5;
__be64 MCAST_prio_6;
__be64 MCAST_prio_7;
__be64 MCAST_novlan;
/* Received unicast not short or GIANT frames with good CRC */
__be64 RTOTG_prio_0;
__be64 RTOTG_prio_1;
__be64 RTOTG_prio_2;
__be64 RTOTG_prio_3;
__be64 RTOTG_prio_4;
__be64 RTOTG_prio_5;
__be64 RTOTG_prio_6;
__be64 RTOTG_prio_7;
__be64 RTOTG_novlan;
/* Count of total octets of received frames, includes framing characters */
__be64 RTTLOCT_prio_0;
/* Count of total octets of received frames, not including framing
characters */
__be64 RTTLOCT_NOFRM_prio_0;
/* Count of Total number of octets received
(only for frames without errors) */
__be64 ROCT_prio_0;
__be64 RTTLOCT_prio_1;
__be64 RTTLOCT_NOFRM_prio_1;
__be64 ROCT_prio_1;
__be64 RTTLOCT_prio_2;
__be64 RTTLOCT_NOFRM_prio_2;
__be64 ROCT_prio_2;
__be64 RTTLOCT_prio_3;
__be64 RTTLOCT_NOFRM_prio_3;
__be64 ROCT_prio_3;
__be64 RTTLOCT_prio_4;
__be64 RTTLOCT_NOFRM_prio_4;
__be64 ROCT_prio_4;
__be64 RTTLOCT_prio_5;
__be64 RTTLOCT_NOFRM_prio_5;
__be64 ROCT_prio_5;
__be64 RTTLOCT_prio_6;
__be64 RTTLOCT_NOFRM_prio_6;
__be64 ROCT_prio_6;
__be64 RTTLOCT_prio_7;
__be64 RTTLOCT_NOFRM_prio_7;
__be64 ROCT_prio_7;
__be64 RTTLOCT_novlan;
__be64 RTTLOCT_NOFRM_novlan;
__be64 ROCT_novlan;
/* Count of Total received frames including bad frames */
__be64 RTOT_prio_0;
/* Count of Total number of received frames with 802.1Q encapsulation */
__be64 R1Q_prio_0;
__be64 reserved1;
__be64 RTOT_prio_1;
__be64 R1Q_prio_1;
__be64 reserved2;
__be64 RTOT_prio_2;
__be64 R1Q_prio_2;
__be64 reserved3;
__be64 RTOT_prio_3;
__be64 R1Q_prio_3;
__be64 reserved4;
__be64 RTOT_prio_4;
__be64 R1Q_prio_4;
__be64 reserved5;
__be64 RTOT_prio_5;
__be64 R1Q_prio_5;
__be64 reserved6;
__be64 RTOT_prio_6;
__be64 R1Q_prio_6;
__be64 reserved7;
__be64 RTOT_prio_7;
__be64 R1Q_prio_7;
__be64 reserved8;
__be64 RTOT_novlan;
__be64 R1Q_novlan;
__be64 reserved9;
/* Total number of Successfully Received Control Frames */
__be64 RCNTL;
__be64 reserved10;
__be64 reserved11;
__be64 reserved12;
/* Count of received frames with a length/type field value between 46
(42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
inclusive */
__be64 RInRangeLengthErr;
/* Count of received frames with length/type field between 1501 and 1535
decimal, inclusive */
__be64 ROutRangeLengthErr;
/* Count of received frames that are longer than max allowed size for
802.3 frames (1518/1522) */
__be64 RFrmTooLong;
/* Count frames received with PCS error */
__be64 PCS;
/* Transmit frames with a length of 64 octets */
__be64 T64_prio_0;
__be64 T64_prio_1;
__be64 T64_prio_2;
__be64 T64_prio_3;
__be64 T64_prio_4;
__be64 T64_prio_5;
__be64 T64_prio_6;
__be64 T64_prio_7;
__be64 T64_novlan;
__be64 T64_loopbk;
/* Transmit frames with a length of 65 to 127 octets. */
__be64 T127_prio_0;
__be64 T127_prio_1;
__be64 T127_prio_2;
__be64 T127_prio_3;
__be64 T127_prio_4;
__be64 T127_prio_5;
__be64 T127_prio_6;
__be64 T127_prio_7;
__be64 T127_novlan;
__be64 T127_loopbk;
/* Transmit frames with a length of 128 to 255 octets */
__be64 T255_prio_0;
__be64 T255_prio_1;
__be64 T255_prio_2;
__be64 T255_prio_3;
__be64 T255_prio_4;
__be64 T255_prio_5;
__be64 T255_prio_6;
__be64 T255_prio_7;
__be64 T255_novlan;
__be64 T255_loopbk;
/* Transmit frames with a length of 256 to 511 octets */
__be64 T511_prio_0;
__be64 T511_prio_1;
__be64 T511_prio_2;
__be64 T511_prio_3;
__be64 T511_prio_4;
__be64 T511_prio_5;
__be64 T511_prio_6;
__be64 T511_prio_7;
__be64 T511_novlan;
__be64 T511_loopbk;
/* Transmit frames with a length of 512 to 1023 octets */
__be64 T1023_prio_0;
__be64 T1023_prio_1;
__be64 T1023_prio_2;
__be64 T1023_prio_3;
__be64 T1023_prio_4;
__be64 T1023_prio_5;
__be64 T1023_prio_6;
__be64 T1023_prio_7;
__be64 T1023_novlan;
__be64 T1023_loopbk;
/* Transmit frames with a length of 1024 to 1518 octets */
__be64 T1518_prio_0;
__be64 T1518_prio_1;
__be64 T1518_prio_2;
__be64 T1518_prio_3;
__be64 T1518_prio_4;
__be64 T1518_prio_5;
__be64 T1518_prio_6;
__be64 T1518_prio_7;
__be64 T1518_novlan;
__be64 T1518_loopbk;
/* Counts transmit frames with a length of 1519 to 1522 bytes */
__be64 T1522_prio_0;
__be64 T1522_prio_1;
__be64 T1522_prio_2;
__be64 T1522_prio_3;
__be64 T1522_prio_4;
__be64 T1522_prio_5;
__be64 T1522_prio_6;
__be64 T1522_prio_7;
__be64 T1522_novlan;
__be64 T1522_loopbk;
/* Transmit frames with a length of 1523 to 1548 octets */
__be64 T1548_prio_0;
__be64 T1548_prio_1;
__be64 T1548_prio_2;
__be64 T1548_prio_3;
__be64 T1548_prio_4;
__be64 T1548_prio_5;
__be64 T1548_prio_6;
__be64 T1548_prio_7;
__be64 T1548_novlan;
__be64 T1548_loopbk;
/* Counts transmit frames with a length of 1549 to MTU bytes */
__be64 T2MTU_prio_0;
__be64 T2MTU_prio_1;
__be64 T2MTU_prio_2;
__be64 T2MTU_prio_3;
__be64 T2MTU_prio_4;
__be64 T2MTU_prio_5;
__be64 T2MTU_prio_6;
__be64 T2MTU_prio_7;
__be64 T2MTU_novlan;
__be64 T2MTU_loopbk;
/* Transmit frames with a length greater than MTU octets and a good CRC. */
__be64 TGIANT_prio_0;
__be64 TGIANT_prio_1;
__be64 TGIANT_prio_2;
__be64 TGIANT_prio_3;
__be64 TGIANT_prio_4;
__be64 TGIANT_prio_5;
__be64 TGIANT_prio_6;
__be64 TGIANT_prio_7;
__be64 TGIANT_novlan;
__be64 TGIANT_loopbk;
/* Transmit broadcast frames with a good CRC */
__be64 TBCAST_prio_0;
__be64 TBCAST_prio_1;
__be64 TBCAST_prio_2;
__be64 TBCAST_prio_3;
__be64 TBCAST_prio_4;
__be64 TBCAST_prio_5;
__be64 TBCAST_prio_6;
__be64 TBCAST_prio_7;
__be64 TBCAST_novlan;
__be64 TBCAST_loopbk;
/* Transmit multicast frames with a good CRC */
__be64 TMCAST_prio_0;
__be64 TMCAST_prio_1;
__be64 TMCAST_prio_2;
__be64 TMCAST_prio_3;
__be64 TMCAST_prio_4;
__be64 TMCAST_prio_5;
__be64 TMCAST_prio_6;
__be64 TMCAST_prio_7;
__be64 TMCAST_novlan;
__be64 TMCAST_loopbk;
/* Transmit good frames that are neither broadcast nor multicast */
__be64 TTOTG_prio_0;
__be64 TTOTG_prio_1;
__be64 TTOTG_prio_2;
__be64 TTOTG_prio_3;
__be64 TTOTG_prio_4;
__be64 TTOTG_prio_5;
__be64 TTOTG_prio_6;
__be64 TTOTG_prio_7;
__be64 TTOTG_novlan;
__be64 TTOTG_loopbk;
/* total octets of transmitted frames, including framing characters */
__be64 TTTLOCT_prio_0;
/* total octets of transmitted frames, not including framing characters */
__be64 TTTLOCT_NOFRM_prio_0;
/* ifOutOctets */
__be64 TOCT_prio_0;
__be64 TTTLOCT_prio_1;
__be64 TTTLOCT_NOFRM_prio_1;
__be64 TOCT_prio_1;
__be64 TTTLOCT_prio_2;
__be64 TTTLOCT_NOFRM_prio_2;
__be64 TOCT_prio_2;
__be64 TTTLOCT_prio_3;
__be64 TTTLOCT_NOFRM_prio_3;
__be64 TOCT_prio_3;
__be64 TTTLOCT_prio_4;
__be64 TTTLOCT_NOFRM_prio_4;
__be64 TOCT_prio_4;
__be64 TTTLOCT_prio_5;
__be64 TTTLOCT_NOFRM_prio_5;
__be64 TOCT_prio_5;
__be64 TTTLOCT_prio_6;
__be64 TTTLOCT_NOFRM_prio_6;
__be64 TOCT_prio_6;
__be64 TTTLOCT_prio_7;
__be64 TTTLOCT_NOFRM_prio_7;
__be64 TOCT_prio_7;
__be64 TTTLOCT_novlan;
__be64 TTTLOCT_NOFRM_novlan;
__be64 TOCT_novlan;
__be64 TTTLOCT_loopbk;
__be64 TTTLOCT_NOFRM_loopbk;
__be64 TOCT_loopbk;
/* Total frames transmitted with a good CRC that are not aborted */
__be64 TTOT_prio_0;
/* Total number of frames transmitted with 802.1Q encapsulation */
__be64 T1Q_prio_0;
__be64 reserved13;
__be64 TTOT_prio_1;
__be64 T1Q_prio_1;
__be64 reserved14;
__be64 TTOT_prio_2;
__be64 T1Q_prio_2;
__be64 reserved15;
__be64 TTOT_prio_3;
__be64 T1Q_prio_3;
__be64 reserved16;
__be64 TTOT_prio_4;
__be64 T1Q_prio_4;
__be64 reserved17;
__be64 TTOT_prio_5;
__be64 T1Q_prio_5;
__be64 reserved18;
__be64 TTOT_prio_6;
__be64 T1Q_prio_6;
__be64 reserved19;
__be64 TTOT_prio_7;
__be64 T1Q_prio_7;
__be64 reserved20;
__be64 TTOT_novlan;
__be64 T1Q_novlan;
__be64 reserved21;
__be64 TTOT_loopbk;
__be64 T1Q_loopbk;
__be64 reserved22;
/* Received frames with a length greater than MTU octets and a bad CRC */
__be32 RJBBR;
/* Received frames with a bad CRC that are not runts, jabbers,
or alignment errors */
__be32 RCRC;
/* Received frames with SFD with a length of less than 64 octets and a
bad CRC */
__be32 RRUNT;
/* Received frames with a length less than 64 octets and a good CRC */
__be32 RSHORT;
/* Total Number of Received Packets Dropped */
__be32 RDROP;
/* Drop due to overflow */
__be32 RdropOvflw;
/* Drop due to overflow */
__be32 RdropLength;
/* Total of good frames. Does not include frames received with
frame-too-long, FCS, or length errors */
__be32 RTOTFRMS;
/* Total dropped Xmited packets */
__be32 TDROP;
};
#endif

View File

@@ -0,0 +1,101 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/vmalloc.h>
#include <linux/mlx4/qp.h>
#include "mlx4_en.h"
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn,
struct mlx4_qp_context *context)
{
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
context->flags = cpu_to_be32(7 << 16 | rss << 13);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
if (is_tx)
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
else
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
context->usr_page = cpu_to_be32(mdev->priv_uar.index);
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
context->pri_path.counter_index = 0xff;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
}
int mlx4_en_map_buffer(struct mlx4_buf *buf)
{
struct page **pages;
int i;
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return 0;
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
return -ENOMEM;
return 0;
}
void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
{
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return;
vunmap(buf->direct.buf);
}
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
}

View File

@@ -0,0 +1,940 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include "mlx4_en.h"
static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr,
u64 *hdr_flags, void *priv)
{
*mac_hdr = page_address(frags->page) + frags->page_offset;
*ip_hdr = *mac_hdr + ETH_HLEN;
*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
*hdr_flags = LRO_IPV4 | LRO_TCP;
return 0;
}
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct skb_frag_struct *skb_frags,
struct mlx4_en_rx_alloc *ring_alloc,
int i)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
struct page *page;
dma_addr_t dma;
if (page_alloc->offset == frag_info->last_offset) {
/* Allocate new page */
page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
if (!page)
return -ENOMEM;
skb_frags[i].page = page_alloc->page;
skb_frags[i].page_offset = page_alloc->offset;
page_alloc->page = page;
page_alloc->offset = frag_info->frag_align;
} else {
page = page_alloc->page;
get_page(page);
skb_frags[i].page = page;
skb_frags[i].page_offset = page_alloc->offset;
page_alloc->offset += frag_info->frag_stride;
}
dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
skb_frags[i].page_offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
rx_desc->data[i].addr = cpu_to_be64(dma);
return 0;
}
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
struct mlx4_en_rx_alloc *page_alloc;
int i;
for (i = 0; i < priv->num_frags; i++) {
page_alloc = &ring->page_alloc[i];
page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
MLX4_EN_ALLOC_ORDER);
if (!page_alloc->page)
goto out;
page_alloc->offset = priv->frag_info[i].frag_align;
en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
i, page_alloc->page);
}
return 0;
out:
while (i--) {
page_alloc = &ring->page_alloc[i];
put_page(page_alloc->page);
page_alloc->page = NULL;
}
return -ENOMEM;
}
static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
struct mlx4_en_rx_alloc *page_alloc;
int i;
for (i = 0; i < priv->num_frags; i++) {
page_alloc = &ring->page_alloc[i];
en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
i, page_count(page_alloc->page));
put_page(page_alloc->page);
page_alloc->page = NULL;
}
}
static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
struct skb_frag_struct *skb_frags = ring->rx_info +
(index << priv->log_rx_info);
int possible_frags;
int i;
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
skb_frags[i].size = priv->frag_info[i].frag_size;
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
}
/* If the number of used fragments does not fill up the ring stride,
* remaining (unused) fragments must be padded with null address/size
* and a special memory key */
possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
for (i = priv->num_frags; i < possible_frags; i++) {
rx_desc->data[i].byte_count = 0;
rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
rx_desc->data[i].addr = 0;
}
}
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct skb_frag_struct *skb_frags = ring->rx_info +
(index << priv->log_rx_info);
int i;
for (i = 0; i < priv->num_frags; i++)
if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
goto err;
return 0;
err:
while (i--)
put_page(skb_frags[i].page);
return -ENOMEM;
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct skb_frag_struct *skb_frags;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
dma_addr_t dma;
int nr;
skb_frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
dma = be64_to_cpu(rx_desc->data[nr].addr);
en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
PCI_DMA_FROMDEVICE);
put_page(skb_frags[nr].page);
}
}
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int ring_ind;
int buf_ind;
int new_size;
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate "
"enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
en_warn(priv, "Only %d buffers allocated "
"reducing ring size to %d",
ring->actual_size, new_size);
goto reduce_rings;
}
}
ring->actual_size++;
ring->prod++;
}
}
return 0;
reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
}
ring->size_mask = ring->actual_size - 1;
}
return 0;
}
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
int index;
en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
while (ring->cons != ring->prod) {
index = ring->cons & ring->size_mask;
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
++ring->cons;
}
}
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int tmp;
ring->prod = 0;
ring->cons = 0;
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct skb_frag_struct));
ring->rx_info = vmalloc(tmp);
if (!ring->rx_info) {
en_err(priv, "Failed allocating rx_info ring\n");
return -ENOMEM;
}
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
if (err)
goto err_ring;
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
en_err(priv, "Failed to map RX buffer\n");
goto err_hwq;
}
ring->buf = ring->wqres.buf.direct.buf;
/* Configure lro mngr */
memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
ring->lro.dev = priv->dev;
ring->lro.features = LRO_F_NAPI;
ring->lro.frag_align_pad = NET_IP_ALIGN;
ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
ring->lro.max_desc = mdev->profile.num_lro;
ring->lro.max_aggr = MAX_SKB_FRAGS;
ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
sizeof(struct net_lro_desc),
GFP_KERNEL);
if (!ring->lro.lro_arr) {
en_err(priv, "Failed to allocate lro array\n");
goto err_map;
}
ring->lro.get_frag_header = mlx4_en_get_frag_header;
return 0;
err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_ring:
vfree(ring->rx_info);
ring->rx_info = NULL;
return err;
}
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int i;
int ring_ind;
int err;
int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
ring->prod = 0;
ring->cons = 0;
ring->actual_size = 0;
ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
ring->stride = stride;
if (ring->stride <= TXBB_SIZE)
ring->buf += TXBB_SIZE;
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring);
/* Initailize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
/* Initialize page allocators */
err = mlx4_en_init_allocator(priv, ring);
if (err) {
en_err(priv, "Failed initializing ring allocator\n");
ring_ind--;
goto err_allocator;
}
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
mlx4_en_update_rx_prod_db(ring);
}
return 0;
err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
kfree(ring->lro.lro_arr);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, ring);
}
/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct skb_frag_struct *skb_frags,
struct skb_frag_struct *skb_frags_rx,
struct mlx4_en_rx_alloc *page_alloc,
int length)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_frag_info *frag_info;
int nr;
dma_addr_t dma;
/* Collect used fragments while replacing them in the HW descirptors */
for (nr = 0; nr < priv->num_frags; nr++) {
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
/* Save page reference in skb */
skb_frags_rx[nr].page = skb_frags[nr].page;
skb_frags_rx[nr].size = skb_frags[nr].size;
skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
dma = be64_to_cpu(rx_desc->data[nr].addr);
/* Allocate a replacement page */
if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
goto fail;
/* Unmap buffer */
pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
skb_frags_rx[nr - 1].size = length -
priv->frag_info[nr - 1].frag_prefix_size;
return nr;
fail:
/* Drop all accumulated fragments (which have already been replaced in
* the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
put_page(skb_frags_rx[nr].page);
}
return 0;
}
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct skb_frag_struct *skb_frags,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct sk_buff *skb;
void *va;
int used_frags;
dma_addr_t dma;
skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
skb->dev = priv->dev;
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
skb->truesize = length + sizeof(struct sk_buff);
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
length, DMA_FROM_DEVICE);
skb->tail += length;
} else {
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
skb_shinfo(skb)->frags,
page_alloc, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
}
skb_shinfo(skb)->nr_frags = used_frags;
/* Copy headers into the skb linear buffer */
memcpy(skb->data, va, HEADER_COPY_SIZE);
skb->tail += HEADER_COPY_SIZE;
/* Skip headers in first fragment */
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
/* Adjust size of first fragment */
skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
skb->data_len = length - HEADER_COPY_SIZE;
}
return skb;
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct skb_frag_struct *skb_frags;
struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
int nr;
unsigned int length;
int polled = 0;
int ip_summed;
if (!priv->port_up)
return 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
cqe = &cq->buf[index];
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
skb_frags = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride);
/*
* make sure we read the CQE after we read the ownership bit
*/
rmb();
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
en_err(priv, "CQE completed in error - vendor "
"syndrom:%d syndrom:%d\n",
((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *) cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
goto next;
}
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
ring->bytes += length;
ring->packets++;
if (likely(priv->rx_csum)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
priv->port_stats.rx_chksum_good++;
/* This packet is eligible for LRO if it is:
* - DIX Ethernet (type interpretation)
* - TCP/IP (v4)
* - without IP options
* - not an IP fragment */
if (mlx4_en_can_lro(cqe->status) &&
dev->features & NETIF_F_LRO) {
nr = mlx4_en_complete_rx_desc(
priv, rx_desc,
skb_frags, lro_frags,
ring->page_alloc, length);
if (!nr)
goto next;
if (priv->vlgrp && (cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
lro_vlan_hwaccel_receive_frags(
&ring->lro, lro_frags,
length, length,
priv->vlgrp,
be16_to_cpu(cqe->sl_vid),
NULL, 0);
} else
lro_receive_frags(&ring->lro,
lro_frags,
length,
length,
NULL, 0);
goto next;
}
/* LRO not possible, complete processing here */
ip_summed = CHECKSUM_UNNECESSARY;
INC_PERF_COUNTER(priv->pstats.lro_misses);
} else {
ip_summed = CHECKSUM_NONE;
priv->port_stats.rx_chksum_none++;
}
} else {
ip_summed = CHECKSUM_NONE;
priv->port_stats.rx_chksum_none++;
}
skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
ring->page_alloc, length);
if (!skb) {
priv->stats.rx_dropped++;
goto next;
}
skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
/* Push it up the stack */
if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK)) {
vlan_hwaccel_receive_skb(skb, priv->vlgrp,
be16_to_cpu(cqe->sl_vid));
} else
netif_receive_skb(skb);
next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[index];
if (++polled == budget) {
/* We are here because we reached the NAPI budget -
* flush only pending LRO sessions */
lro_flush_all(&ring->lro);
goto out;
}
}
/* If CQ is empty flush all LRO sessions unconditionally */
lro_flush_all(&ring->lro);
out:
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */
mlx4_en_update_rx_prod_db(ring);
return polled;
}
void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
if (priv->port_up)
napi_schedule(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
int done;
done = mlx4_en_process_rx_cq(dev, cq, budget);
/* If we used up all the quota - we're probably not done yet... */
if (done == budget)
INC_PERF_COUNTER(priv->pstats.napi_quota);
else {
/* Done for now */
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
return done;
}
/* Calculate the last offset position that accomodates a full fragment
* (assuming fagment size = stride-align) */
static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
{
u16 res = MLX4_EN_ALLOC_SIZE % stride;
u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
"res:%d offset:%d\n", stride, align, res, offset);
return offset;
}
static int frag_sizes[] = {
FRAG_SZ0,
FRAG_SZ1,
FRAG_SZ2,
FRAG_SZ3
};
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
int buf_size = 0;
int i = 0;
while (buf_size < eff_mtu) {
priv->frag_info[i].frag_size =
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
if (!i) {
priv->frag_info[i].frag_align = NET_IP_ALIGN;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
} else {
priv->frag_info[i].frag_align = 0;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
}
priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
priv, priv->frag_info[i].frag_stride,
priv->frag_info[i].frag_align);
buf_size += priv->frag_info[i].frag_size;
i++;
}
priv->num_frags = i;
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
"stride:%d last_offset:%d\n", i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride,
priv->frag_info[i].last_offset);
}
}
/* RSS related functions */
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_rx_ring *ring,
enum mlx4_qp_state *state,
struct mlx4_qp *qp)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_qp_context *context;
int err = 0;
context = kmalloc(sizeof *context , GFP_KERNEL);
if (!context) {
en_err(priv, "Failed to allocate qp context\n");
return -ENOMEM;
}
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
}
qp->event = mlx4_en_sqp_event;
memset(context, 0, sizeof *context);
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0,
qpn, ring->cqn, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
if (err) {
mlx4_qp_remove(mdev->dev, qp);
mlx4_qp_free(mdev->dev, qp);
}
mlx4_en_update_rx_prod_db(ring);
out:
kfree(context);
return err;
}
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
struct mlx4_en_rss_context *rss_context;
void *ptr;
int rss_xor = mdev->profile.rss_xor;
u8 rss_mask = mdev->profile.rss_mask;
int i, qpn;
int err = 0;
int good_qps = 0;
en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
&rss_map->base_qpn);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i;
err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
&rss_map->state[i],
&rss_map->qps[i]);
if (err)
goto rss_err;
++good_qps;
}
/* Configure RSS indirection qp */
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
if (err) {
en_err(priv, "Failed to reserve range for RSS "
"indirection qp\n");
goto rss_err;
}
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto reserve_err;
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
ptr = ((void *) &context) + 0x3c;
rss_context = (struct mlx4_en_rss_context *) ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
rss_context->hash_fn = rss_xor & 0x3;
rss_context->flags = rss_mask << 2;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
goto indir_err;
return 0;
indir_err:
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
reserve_err:
mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
rss_err:
for (i = 0; i < good_qps; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
return err;
}
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
int i;
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
}

View File

@@ -0,0 +1,776 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <asm/page.h>
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include "mlx4_en.h"
enum {
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
};
static int inline_thold __read_mostly = MAX_INLINE;
module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, u32 size,
u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
int tmp;
int err;
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
inline_thold = min(inline_thold, MAX_INLINE);
spin_lock_init(&ring->comp_lock);
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc(tmp);
if (!ring->tx_info) {
en_err(priv, "Failed allocating tx_info ring\n");
return -ENOMEM;
}
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
if (!ring->bounce_buf) {
en_err(priv, "Failed allocating bounce buffer\n");
err = -ENOMEM;
goto err_tx;
}
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
en_err(priv, "Failed to map TX buffer\n");
goto err_hwq_res;
}
ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
"buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
if (err) {
en_err(priv, "Failed reserving qp for tx ring.\n");
goto err_map;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
ring->qp.event = mlx4_en_sqp_event;
return 0;
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
err_tx:
vfree(ring->tx_info);
ring->tx_info = NULL;
return err;
}
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
vfree(ring->tx_info);
ring->tx_info = NULL;
}
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
ring->cqn = cq;
ring->prod = 0;
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
ring->poll_cnt = 0;
ring->blocked = 0;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
return err;
}
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u8 owner)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
struct sk_buff *skb = tx_info->skb;
struct skb_frag_struct *frag;
void *end = ring->buf + ring->buf_size;
int frags = skb_shinfo(skb)->nr_frags;
int i;
__be32 *ptr = (__be32 *)tx_desc;
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
/* Optimize the common case when there are no wraparounds */
if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
if (!tx_info->inl) {
if (tx_info->linear) {
pci_unmap_single(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
++data;
}
for (i = 0; i < frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data[i].addr),
frag->size, PCI_DMA_TODEVICE);
}
}
/* Stamp the freed descriptor */
for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
}
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
data = (struct mlx4_wqe_data_seg *)
(ring->buf + ((void *) data - end));
}
if (tx_info->linear) {
pci_unmap_single(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
++data;
}
for (i = 0; i < frags; i++) {
/* Check for wraparound before unmapping */
if ((void *) data >= end)
data = (struct mlx4_wqe_data_seg *) ring->buf;
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
frag->size, PCI_DMA_TODEVICE);
++data;
}
}
/* Stamp the freed descriptor */
for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
if ((void *) ptr >= end) {
ptr = ring->buf;
stamp ^= cpu_to_be32(0x80000000);
}
}
}
dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
}
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int cnt = 0;
/* Skip last polled descriptor */
ring->cons += ring->last_nr_txbb;
en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
ring->cons, ring->prod);
if ((u32) (ring->prod - ring->cons) > ring->size) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Tx consumer passed producer!\n");
return 0;
}
while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
!!(ring->cons & ring->size));
ring->cons += ring->last_nr_txbb;
cnt++;
}
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
return cnt;
}
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe = cq->buf;
u16 index;
u16 new_index;
u32 txbbs_skipped = 0;
u32 cq_last_sav;
/* index always points to the first TXBB of the last polled descriptor */
index = ring->cons & ring->size_mask;
new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
if (index == new_index)
return;
if (!priv->port_up)
return;
/*
* We use a two-stage loop:
* - the first samples the HW-updated CQE
* - the second frees TXBBs until the last sample
* This lets us amortize CQE cache misses, while still polling the CQ
* until is quiescent.
*/
cq_last_sav = mcq->cons_index;
do {
do {
/* Skip over last polled CQE */
index = (index + ring->last_nr_txbb) & ring->size_mask;
txbbs_skipped += ring->last_nr_txbb;
/* Poll next CQE */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
priv, ring, index,
!!((ring->cons + txbbs_skipped) &
ring->size));
++mcq->cons_index;
} while (index != new_index);
new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
} while (index != new_index);
AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
(u32) (mcq->cons_index - cq_last_sav));
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
/* Wakeup Tx queue if this ring stopped it */
if (unlikely(ring->blocked)) {
if ((u32) (ring->prod - ring->cons) <=
ring->size - HEADROOM - MAX_DESC_TXBBS) {
ring->blocked = 0;
netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
priv->port_stats.wake_queue++;
}
}
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
{
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
if (!spin_trylock(&ring->comp_lock))
return;
mlx4_en_process_tx_cq(cq->dev, cq);
mod_timer(&cq->timer, jiffies + 1);
spin_unlock(&ring->comp_lock);
}
void mlx4_en_poll_tx_cq(unsigned long data)
{
struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
u32 inflight;
INC_PERF_COUNTER(priv->pstats.tx_poll);
if (!spin_trylock_irq(&ring->comp_lock)) {
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
return;
}
mlx4_en_process_tx_cq(cq->dev, cq);
inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
/* If there are still packets in flight and the timer has not already
* been scheduled by the Tx routine then schedule it here to guarantee
* completion processing of these packets */
if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
spin_unlock_irq(&ring->comp_lock);
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
u32 index,
unsigned int desc_size)
{
u32 copy = (ring->size - index) * TXBB_SIZE;
int i;
for (i = desc_size - copy - 4; i >= 0; i -= 4) {
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
*((u32 *) (ring->buf + i)) =
*((u32 *) (ring->bounce_buf + copy + i));
}
for (i = copy - 4; i >= 4 ; i -= 4) {
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
*((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
*((u32 *) (ring->bounce_buf + i));
}
/* Return real descriptor location */
return ring->buf + index * TXBB_SIZE;
}
static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
{
struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
unsigned long flags;
/* If we don't have a pending timer, set one up to catch our recent
post in case the interface becomes idle */
if (!timer_pending(&cq->timer))
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
mlx4_en_process_tx_cq(priv->dev, cq);
spin_unlock_irqrestore(&ring->comp_lock, flags);
}
}
static void *get_frag_ptr(struct sk_buff *skb)
{
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
struct page *page = frag->page;
void *ptr;
ptr = page_address(page);
if (unlikely(!ptr))
return NULL;
return ptr + frag->page_offset;
}
static int is_inline(struct sk_buff *skb, void **pfrag)
{
void *ptr;
if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
if (skb_shinfo(skb)->nr_frags == 1) {
ptr = get_frag_ptr(skb);
if (unlikely(!ptr))
return 0;
if (pfrag)
*pfrag = ptr;
return 1;
} else if (unlikely(skb_shinfo(skb)->nr_frags))
return 0;
else
return 1;
}
return 0;
}
static int inline_size(struct sk_buff *skb)
{
if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
<= MLX4_INLINE_ALIGN)
return ALIGN(skb->len + CTRL_SIZE +
sizeof(struct mlx4_wqe_inline_seg), 16);
else
return ALIGN(skb->len + CTRL_SIZE + 2 *
sizeof(struct mlx4_wqe_inline_seg), 16);
}
static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int *lso_header_size)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (skb_is_gso(skb)) {
*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
if (*lso_header_size < skb_headlen(skb))
real_size += DS_SIZE;
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
return 0;
}
}
} else {
*lso_header_size = 0;
if (!is_inline(skb, NULL))
real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
else
real_size = inline_size(skb);
}
return real_size;
}
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
if (skb->len <= spc) {
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
skb_shinfo(skb)->frags[0].size);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (skb_headlen(skb) <= spc) {
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_headlen(skb) < spc) {
memcpy(((void *)(inl + 1)) + skb_headlen(skb),
fragptr, spc - skb_headlen(skb));
fragptr += spc - skb_headlen(skb);
}
inl = (void *) (inl + 1) + spc;
memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
} else {
skb_copy_from_linear_data(skb, inl + 1, spc);
inl = (void *) (inl + 1) + spc;
skb_copy_from_linear_data_offset(skb, spc, inl + 1,
skb_headlen(skb) - spc);
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
fragptr, skb_shinfo(skb)->frags[0].size);
}
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 vlan_tag = 0;
/* If we support per priority flow control and the packet contains
* a vlan tag, send the packet to the TX ring assigned to that priority
*/
if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
vlan_tag = vlan_tx_tag_get(skb);
return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
}
return skb_tx_hash(dev, skb);
}
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_cq *cq;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
int tx_ind = 0;
int nr_txbb;
int desc_size;
int real_size;
dma_addr_t dma;
u32 index;
__be32 op_own;
u16 vlan_tag = 0;
int i;
int lso_header_size;
void *fragptr;
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
goto tx_drop;
/* Allign descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size / TXBB_SIZE;
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
goto tx_drop;
}
tx_ind = skb->queue_mapping;
ring = &priv->tx_ring[tx_ind];
if (priv->vlgrp && vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb);
/* Check available TXBBs And 2K spare for prefetch */
if (unlikely(((int)(ring->prod - ring->cons)) >
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
ring->blocked = 1;
priv->port_stats.queue_stopped++;
/* Use interrupts to find out when queue opened */
cq = &priv->tx_cq[tx_ind];
mlx4_en_arm_cq(priv, cq);
return NETDEV_TX_BUSY;
}
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
/* Packet is good - grab an index and transmit it */
index = ring->prod & ring->size_mask;
/* See if we have enough space for whole descriptor TXBB for setting
* SW ownership on next descriptor; if not, use a bounce buffer. */
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + index * TXBB_SIZE;
else
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
/* Save skb in tx_info ring */
tx_info = &ring->tx_info[index];
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
priv->port_stats.tx_chksum_offload++;
}
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
skb_shinfo(skb)->gso_size << 16 | lso_header_size);
/* Copy headers;
* note that we already verified that it is linear */
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
data = ((void *) &tx_desc->lso +
ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
ring->bytes += skb->len + (i - 1) * lso_header_size;
ring->packets += i;
} else {
/* Normal (Non LSO) packet */
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
data = &tx_desc->data;
ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
ring->packets++;
}
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
/* valid only for none inline segments */
tx_info->data_offset = (void *) data - (void *) tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
if (!is_inline(skb, &fragptr)) {
/* Map fragments */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(frag->size);
--data;
}
/* Map linear part */
if (tx_info->linear) {
dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
}
tx_info->inl = 0;
} else {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1;
}
ring->prod += nr_txbb;
/* If we used a bounce buffer then copy descriptor back into place */
if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
/* Run destructor before passing skb to HW */
if (likely(!skb_shared(skb)))
skb_orphan(skb);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
wmb();
tx_desc->ctrl.owner_opcode = op_own;
/* Ring doorbell! */
wmb();
writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
/* Poll CQ here */
mlx4_en_xmit_poll(priv, tx_ind);
return NETDEV_TX_OK;
tx_drop:
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}

View File

@@ -0,0 +1,700 @@
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "fw.h"
enum {
MLX4_IRQNAME_SIZE = 64
};
enum {
MLX4_NUM_ASYNC_EQE = 0x100,
MLX4_NUM_SPARE_EQE = 0x80,
MLX4_EQ_ENTRY_SIZE = 0x20
};
/*
* Must be packed because start is 64 bits but only aligned to 32 bits.
*/
struct mlx4_eq_context {
__be32 flags;
u16 reserved1[3];
__be16 page_offset;
u8 log_eq_size;
u8 reserved2[4];
u8 eq_period;
u8 reserved3;
u8 eq_max_count;
u8 reserved4[3];
u8 intr;
u8 log_page_size;
u8 reserved5[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
u32 reserved6[2];
__be32 consumer_index;
__be32 producer_index;
u32 reserved7[4];
};
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
#define MLX4_EQ_OWNER_HW ( 1 << 24)
#define MLX4_EQ_FLAG_EC ( 1 << 18)
#define MLX4_EQ_FLAG_OI ( 1 << 17)
#define MLX4_EQ_STATE_ARMED ( 9 << 8)
#define MLX4_EQ_STATE_FIRED (10 << 8)
#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
(1ull << MLX4_EVENT_TYPE_COMM_EST) | \
(1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
(1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD))
struct mlx4_eqe {
u8 reserved1;
u8 type;
u8 reserved2;
u8 subtype;
union {
u32 raw[6];
struct {
__be32 cqn;
} __attribute__((packed)) comp;
struct {
u16 reserved1;
__be16 token;
u32 reserved2;
u8 reserved3[3];
u8 status;
__be64 out_param;
} __attribute__((packed)) cmd;
struct {
__be32 qpn;
} __attribute__((packed)) qp;
struct {
__be32 srqn;
} __attribute__((packed)) srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
} __attribute__((packed)) cq_err;
struct {
u32 reserved1[2];
__be32 port;
} __attribute__((packed)) port_change;
} event;
u8 reserved3[3];
u8 owner;
} __attribute__((packed));
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
req_not << 31),
eq->doorbell);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
{
unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
}
static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
{
struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
while ((eqe = next_eqe_sw(eq))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
rmb();
switch (eqe->type) {
case MLX4_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
mlx4_cq_completion(dev, cqn);
break;
case MLX4_EVENT_TYPE_PATH_MIG:
case MLX4_EVENT_TYPE_COMM_EST:
case MLX4_EVENT_TYPE_SQ_DRAINED:
case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
mlx4_cmd_event(dev,
be16_to_cpu(eqe->event.cmd.token),
eqe->event.cmd.status,
be64_to_cpu(eqe->event.cmd.out_param));
break;
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
} else {
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
}
break;
case MLX4_EVENT_TYPE_CQ_ERROR:
mlx4_warn(dev, "CQ %s on CQN %06x\n",
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
eqe->type);
break;
case MLX4_EVENT_TYPE_EQ_OVERFLOW:
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
break;
};
++eq->cons_index;
eqes_found = 1;
++set_ci;
/*
* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MLX4_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
eq_set_ci(eq, 0);
set_ci = 0;
}
}
eq_set_ci(eq, 1);
return eqes_found;
}
static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
{
struct mlx4_dev *dev = dev_ptr;
struct mlx4_priv *priv = mlx4_priv(dev);
int work = 0;
int i;
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
}
static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
{
struct mlx4_eq *eq = eq_ptr;
struct mlx4_dev *dev = eq->dev;
mlx4_eq_int(dev, eq);
/* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
{
/*
* Each UAR holds 4 EQ doorbells. To figure out how many UARs
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int index;
index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
if (!priv->eq_table.uar_map[index]) {
priv->eq_table.uar_map[index] =
ioremap(pci_resource_start(dev->pdev, 2) +
((eq->eqn / 4) << PAGE_SHIFT),
PAGE_SIZE);
if (!priv->eq_table.uar_map[index]) {
mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
eq->eqn);
return NULL;
}
}
return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
}
static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
u8 intr, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_eq_context *eq_context;
int npages;
u64 *dma_list = NULL;
dma_addr_t t;
u64 mtt_addr;
int err = -ENOMEM;
int i;
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
if (!eq->page_list)
goto err_out;
for (i = 0; i < npages; ++i)
eq->page_list[i].buf = NULL;
dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list)
goto err_out_free;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
goto err_out_free;
eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
PAGE_SIZE, &t, GFP_KERNEL);
if (!eq->page_list[i].buf)
goto err_out_free_pages;
dma_list[i] = t;
eq->page_list[i].map = t;
memset(eq->page_list[i].buf, 0, PAGE_SIZE);
}
eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
if (eq->eqn == -1)
goto err_out_free_pages;
eq->doorbell = mlx4_get_eq_uar(dev, eq);
if (!eq->doorbell) {
err = -ENOMEM;
goto err_out_free_eq;
}
err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
if (err)
goto err_out_free_eq;
err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
if (err)
goto err_out_free_mtt;
memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
MLX4_EQ_STATE_ARMED);
eq_context->log_eq_size = ilog2(eq->nent);
eq_context->intr = intr;
eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
eq_context->mtt_base_addr_h = mtt_addr >> 32;
eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
if (err) {
mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
goto err_out_free_mtt;
}
kfree(dma_list);
mlx4_free_cmd_mailbox(dev, mailbox);
eq->cons_index = 0;
return err;
err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
err_out_free_pages:
for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
mlx4_free_cmd_mailbox(dev, mailbox);
err_out_free:
kfree(eq->page_list);
kfree(dma_list);
err_out:
return err;
}
static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int err;
int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
int i;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return;
err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
if (err)
mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
if (0) {
mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
if ((i + 1) % 4 == 0)
printk("\n");
}
}
mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i)
pci_free_consistent(dev->pdev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
kfree(eq->page_list);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
mlx4_free_cmd_mailbox(dev, mailbox);
}
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
int i;
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
kfree(eq_table->irq_names);
}
static int mlx4_map_clr_int(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
return -ENOMEM;
}
return 0;
}
static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
iounmap(priv->clr_base);
}
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
sizeof *priv->eq_table.eq, GFP_KERNEL);
if (!priv->eq_table.eq)
return -ENOMEM;
return 0;
}
void mlx4_free_eq_table(struct mlx4_dev *dev)
{
kfree(mlx4_priv(dev)->eq_table.eq);
}
int mlx4_init_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int i;
priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
mlx4_num_eq_uar(dev), GFP_KERNEL);
if (!priv->eq_table.uar_map) {
err = -ENOMEM;
goto err_out_free;
}
err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
if (err)
goto err_out_free;
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
err = mlx4_map_clr_int(dev);
if (err)
goto err_out_bitmap;
priv->eq_table.clr_mask =
swab32(1 << (priv->eq_table.inta_pin & 31));
priv->eq_table.clr_int = priv->clr_base +
(priv->eq_table.inta_pin < 32 ? 4 : 0);
priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
goto err_out_bitmap;
}
for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
(dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
&priv->eq_table.eq[i]);
if (err) {
--i;
goto err_out_unmap;
}
}
err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
(dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
&priv->eq_table.eq[dev->caps.num_comp_vectors]);
if (err)
goto err_out_comp;
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
if (i < dev->caps.num_comp_vectors) {
snprintf(priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE,
"mlx4-comp-%d@pci:%s", i,
pci_name(dev->pdev));
} else {
snprintf(priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE,
"mlx4-async@pci:%s",
pci_name(dev->pdev));
}
eq_name = priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE;
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt, 0, eq_name,
priv->eq_table.eq + i);
if (err)
goto err_out_async;
priv->eq_table.eq[i].have_irq = 1;
}
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
DRV_NAME "@pci:%s",
pci_name(dev->pdev));
err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
goto err_out_async;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
eq_set_ci(&priv->eq_table.eq[i], 1);
return 0;
err_out_async:
mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
err_out_comp:
i = dev->caps.num_comp_vectors - 1;
err_out_unmap:
while (i >= 0) {
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
--i;
}
mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
err_out_free:
kfree(priv->eq_table.uar_map);
return err;
}
void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
mlx4_free_irqs(dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
iounmap(priv->eq_table.uar_map[i]);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
kfree(priv->eq_table.uar_map);
}

View File

@@ -0,0 +1,891 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx4/cmd.h>
#include <linux/cache.h>
#include "fw.h"
#include "icm.h"
enum {
MLX4_COMMAND_INTERFACE_MIN_REV = 2,
MLX4_COMMAND_INTERFACE_MAX_REV = 3,
MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
};
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
static int enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
switch (sizeof (dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
case 8: (dest) = be64_to_cpup(__p); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
} while (0)
#define MLX4_PUT(dest, source, offset) \
do { \
void *__d = ((char *) (dest) + (offset)); \
switch (sizeof(source)) { \
case 1: *(u8 *) __d = (source); break; \
case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
default: __buggy_use_of_MLX4_PUT(); \
} \
} while (0)
static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
{
static const char *fname[] = {
[ 0] = "RC transport",
[ 1] = "UC transport",
[ 2] = "UD transport",
[ 3] = "XRC transport",
[ 4] = "reliable multicast",
[ 5] = "FCoIB support",
[ 6] = "SRQ support",
[ 7] = "IPoIB checksum offload",
[ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter",
[10] = "VMM",
[12] = "DPDP",
[16] = "MW support",
[17] = "APM support",
[18] = "Atomic ops support",
[19] = "Raw multicast support",
[20] = "Address vector port checking support",
[21] = "UD multicast support",
[24] = "Demand paging support",
[25] = "Router support"
};
int i;
mlx4_dbg(dev, "DEV_CAP flags:\n");
for (i = 0; i < ARRAY_SIZE(fname); ++i)
if (fname[i] && (flags & (1 << i)))
mlx4_dbg(dev, " %s\n", fname[i]);
}
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *inbox;
int err = 0;
#define MOD_STAT_CFG_IN_SIZE 0x100
#define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
#define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
MLX4_CMD_TIME_CLASS_A);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
u16 size;
u16 stat_rate;
int err;
int i;
#define QUERY_DEV_CAP_OUT_SIZE 0x100
#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
#define QUERY_DEV_CAP_BF_OFFSET 0x4c
#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A);
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
dev_cap->max_qps = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
dev_cap->reserved_srqs = 1 << (field >> 4);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
dev_cap->max_srqs = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
dev_cap->max_cq_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
dev_cap->reserved_cqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
dev_cap->max_cqs = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
dev_cap->max_mpts = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
dev_cap->reserved_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
dev_cap->reserved_mtts = 1 << (field >> 4);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
dev_cap->max_mrw_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
dev_cap->reserved_mrws = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
dev_cap->max_mtt_seg = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
field &= 0x1f;
if (!field)
dev_cap->max_gso_sz = 0;
else
dev_cap->max_gso_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
dev_cap->max_rdma_global = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
dev_cap->local_ca_ack_delay = field & 0x1f;
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
dev_cap->min_page_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
if (field & 0x80) {
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
} else {
dev_cap->bf_reg_size = 0;
mlx4_dbg(dev, "BlueFlame not available\n");
}
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
dev_cap->max_sq_sg = field;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
dev_cap->max_sq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
dev_cap->max_qp_per_mcg = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
dev_cap->reserved_mgms = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
dev_cap->max_mcgs = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
dev_cap->reserved_pds = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
dev_cap->max_pds = 1 << (field & 0x3f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
dev_cap->rdmarc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
dev_cap->qpc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
dev_cap->aux_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
dev_cap->altc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
dev_cap->eqc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
dev_cap->cqc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
dev_cap->srq_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
dev_cap->cmpt_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
dev_cap->mtt_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
dev_cap->dmpt_entry_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
dev_cap->max_srq_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
dev_cap->max_qp_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
dev_cap->resize_srq = field & 1;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
dev_cap->max_rq_sg = field;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size;
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
for (i = 1; i <= dev_cap->num_ports; ++i) {
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->max_vl[i] = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
dev_cap->ib_mtu[i] = field >> 4;
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
dev_cap->max_gids[i] = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
}
} else {
#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
#define QUERY_PORT_MTU_OFFSET 0x01
#define QUERY_PORT_ETH_MTU_OFFSET 0x02
#define QUERY_PORT_WIDTH_OFFSET 0x06
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
#define QUERY_PORT_MAC_OFFSET 0x10
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
MLX4_CMD_TIME_CLASS_B);
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
dev_cap->max_gids[i] = 1 << (field >> 4);
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
dev_cap->max_vl[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
dev_cap->log_max_macs[i] = field & 0xf;
dev_cap->log_max_vlans[i] = field >> 4;
MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
}
}
mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
dev_cap->bmme_flags, dev_cap->reserved_lkey);
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
* we can't use any EQs whose doorbell falls on that page,
* even if the EQ itself isn't reserved.
*/
dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
dev_cap->reserved_eqs);
mlx4_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_cap->max_icm_sz >> 20);
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
dev_cap->reserved_mrws, dev_cap->reserved_mtts);
mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
dev_cap->max_pds, dev_cap->reserved_mgms);
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
dev_cap->max_port_width[1]);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
dump_dev_cap_flags(dev, dev_cap->flags);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_icm_iter iter;
__be64 *pages;
int lg;
int nent = 0;
int i;
int err = 0;
int ts = 0, tc = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
pages = mailbox->buf;
for (mlx4_icm_first(icm, &iter);
!mlx4_icm_last(&iter);
mlx4_icm_next(&iter)) {
/*
* We have to pass pages that are aligned to their
* size, so find the least significant 1 in the
* address or size and use that as our log2 size.
*/
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) {
mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
MLX4_ICM_PAGE_SIZE,
(unsigned long long) mlx4_icm_addr(&iter),
mlx4_icm_size(&iter));
err = -EINVAL;
goto out;
}
for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
virt += 1 << lg;
}
pages[nent * 2 + 1] =
cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
(lg - MLX4_ICM_PAGE_SHIFT));
ts += 1 << (lg - 10);
++tc;
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
MLX4_CMD_TIME_CLASS_B);
if (err)
goto out;
nent = 0;
}
}
}
if (nent)
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
if (err)
goto out;
switch (op) {
case MLX4_CMD_MAP_FA:
mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM_AUX:
mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM:
mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
}
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
{
struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
int err = 0;
u64 fw_ver;
u16 cmd_if_rev;
u8 lg;
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
#define QUERY_FW_ERR_SIZE_OFFSET 0x38
#define QUERY_FW_ERR_BAR_OFFSET 0x3c
#define QUERY_FW_SIZE_OFFSET 0x00
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
MLX4_CMD_TIME_CLASS_A);
if (err)
goto out;
MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
/*
* FW subminor version is at more significant bits than minor
* version, so swap here.
*/
dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
mlx4_err(dev, "Installed FW has unsupported "
"command interface revision %d.\n",
cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff);
mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV;
goto out;
}
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
cmd->max_cmds = 1 << lg;
mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff,
cmd_if_rev, cmd->max_cmds);
MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
fw->catas_bar = (fw->catas_bar >> 6) * 2;
mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
(unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
/*
* Round up number of system pages needed in case
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
*/
fw->fw_pages =
ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
(unsigned long long) fw->clr_int_base, fw->clr_int_bar);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
static void get_board_id(void *vsd, char *board_id)
{
int i;
#define VSD_OFFSET_SIG1 0x00
#define VSD_OFFSET_SIG2 0xde
#define VSD_OFFSET_MLX_BOARD_ID 0xd0
#define VSD_OFFSET_TS_BOARD_ID 0x20
#define VSD_SIGNATURE_TOPSPIN 0x5ad
memset(board_id, 0, MLX4_BOARD_ID_LEN);
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
* swaps each 4-byte word before passing it back to
* us. Therefore we need to swab it before printing.
*/
for (i = 0; i < 4; ++i)
((u32 *) board_id)[i] =
swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
}
}
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
MLX4_CMD_TIME_CLASS_A);
if (err)
goto out;
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
adapter->board_id);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *inbox;
int err;
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
#define INIT_HCA_UAR_OFFSET 0x120
#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
memset(inbox, 0, INIT_HCA_IN_SIZE);
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
(ilog2(cache_line_size()) - 4) << 5;
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
#elif defined(__BIG_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
#else
#error Host endianness not defined
#endif
/* Check port for UD address vector: */
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
/* Enable IPoIB checksumming if we can: */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
/* Enable QoS support if module parameter set */
if (enable_qos)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
/* multicast attributes */
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
/* TPT attributes */
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *inbox;
int err;
u32 flags;
u16 field;
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
#define INIT_PORT_IN_SIZE 256
#define INIT_PORT_FLAGS_OFFSET 0x00
#define INIT_PORT_FLAG_SIG (1 << 18)
#define INIT_PORT_FLAG_NG (1 << 17)
#define INIT_PORT_FLAG_G0 (1 << 16)
#define INIT_PORT_VL_SHIFT 4
#define INIT_PORT_PORT_WIDTH_SHIFT 8
#define INIT_PORT_MTU_OFFSET 0x04
#define INIT_PORT_MAX_GID_OFFSET 0x06
#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
#define INIT_PORT_GUID0_OFFSET 0x10
#define INIT_PORT_NODE_GUID_OFFSET 0x18
#define INIT_PORT_SI_GUID_OFFSET 0x20
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
memset(inbox, 0, INIT_PORT_IN_SIZE);
flags = 0;
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
field = 128 << dev->caps.ib_mtu_cap[port];
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
field = dev->caps.gid_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
field = dev->caps.pkey_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
MLX4_CMD_TIME_CLASS_A);
if (ret)
return ret;
/*
* Round up number of system pages needed in case
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
*/
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
return 0;
}
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
}

View File

@@ -0,0 +1,177 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX4_FW_H
#define MLX4_FW_H
#include "mlx4.h"
#include "icm.h"
struct mlx4_mod_stat_cfg {
u8 log_pg_sz;
u8 log_pg_sz_m;
};
struct mlx4_dev_cap {
int max_srq_sz;
int max_qp_sz;
int reserved_qps;
int max_qps;
int reserved_srqs;
int max_srqs;
int max_cq_sz;
int reserved_cqs;
int max_cqs;
int max_mpts;
int reserved_eqs;
int max_eqs;
int reserved_mtts;
int max_mrw_sz;
int reserved_mrws;
int max_mtt_seg;
int max_requester_per_qp;
int max_responder_per_qp;
int max_rdma_global;
int local_ca_ack_delay;
int num_ports;
u32 max_msg_sz;
int ib_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
int max_gids[MLX4_MAX_PORTS + 1];
int max_pkeys[MLX4_MAX_PORTS + 1];
u64 def_mac[MLX4_MAX_PORTS + 1];
u16 eth_mtu[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
u32 flags;
int reserved_uars;
int uar_size;
int min_page_sz;
int bf_reg_size;
int bf_regs_per_page;
int max_sq_sg;
int max_sq_desc_sz;
int max_rq_sg;
int max_rq_desc_sz;
int max_qp_per_mcg;
int reserved_mgms;
int max_mcgs;
int reserved_pds;
int max_pds;
int qpc_entry_sz;
int rdmarc_entry_sz;
int altc_entry_sz;
int aux_entry_sz;
int srq_entry_sz;
int cqc_entry_sz;
int eqc_entry_sz;
int dmpt_entry_sz;
int cmpt_entry_sz;
int mtt_entry_sz;
int resize_srq;
u32 bmme_flags;
u32 reserved_lkey;
u64 max_icm_sz;
int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
};
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
};
struct mlx4_init_hca_param {
u64 qpc_base;
u64 rdmarc_base;
u64 auxc_base;
u64 altc_base;
u64 srqc_base;
u64 cqc_base;
u64 eqc_base;
u64 mc_base;
u64 dmpt_base;
u64 cmpt_base;
u64 mtt_base;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
u8 log_num_qps;
u8 log_num_srqs;
u8 log_num_cqs;
u8 log_num_eqs;
u8 log_rd_per_qp;
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
};
struct mlx4_init_ib_param {
int port_width;
int vl_cap;
int mtu_cap;
u16 gid_cap;
u16 pkey_cap;
int set_guid0;
u64 guid0;
int set_node_guid;
u64 node_guid;
int set_si_guid;
u64 si_guid;
};
struct mlx4_set_ib_param {
int set_si_guid;
int reset_qkey_viol;
u64 si_guid;
u32 cap_mask;
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
int mlx4_QUERY_FW(struct mlx4_dev *dev);
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
int mlx4_NOP(struct mlx4_dev *dev);
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
#endif /* MLX4_FW_H */

View File

@@ -0,0 +1,454 @@
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
#include "fw.h"
/*
* We allocate in as big chunks as we can, up to a maximum of 256 KB
* per chunk.
*/
enum {
MLX4_ICM_ALLOC_SIZE = 1 << 18,
MLX4_TABLE_CHUNK_SIZE = 1 << 18
};
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
if (chunk->nsg > 0)
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
{
struct mlx4_icm_chunk *chunk, *tmp;
if (!icm)
return;
list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
if (coherent)
mlx4_free_icm_coherent(dev, chunk);
else
mlx4_free_icm_pages(dev, chunk);
kfree(chunk);
}
kfree(icm);
}
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
struct page *page;
page = alloc_pages(gfp_mask, order);
if (!page)
return -ENOMEM;
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
}
static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
int order, gfp_t gfp_mask)
{
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
return -ENOMEM;
sg_set_buf(mem, buf, PAGE_SIZE << order);
BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent)
{
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk = NULL;
int cur_order;
int ret;
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
return NULL;
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
while (npages > 0) {
if (!chunk) {
chunk = kmalloc(sizeof *chunk,
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!chunk)
goto fail;
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
}
while (1 << cur_order > npages)
--cur_order;
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
&chunk->mem[chunk->npages],
cur_order, gfp_mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
if (!ret) {
++chunk->npages;
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
npages -= 1 << cur_order;
} else {
--cur_order;
if (cur_order < 0)
goto fail;
}
}
if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
return icm;
fail:
mlx4_free_icm(dev, icm, coherent);
return NULL;
}
static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
__be64 *inbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
inbox[0] = cpu_to_be64(virt);
inbox[1] = cpu_to_be64(dma_addr);
err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
if (!err)
mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
(unsigned long long) dma_addr, (unsigned long long) virt);
return err;
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
}
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
{
int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
int ret = 0;
mutex_lock(&table->mutex);
if (table->icm[i]) {
++table->icm[i]->refcount;
goto out;
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
goto out;
}
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
(u64) i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
goto out;
}
++table->icm[i]->refcount;
out:
mutex_unlock(&table->mutex);
return ret;
}
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
{
int i;
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
mutex_lock(&table->mutex);
if (--table->icm[i]->refcount == 0) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
mutex_unlock(&table->mutex);
}
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
{
int idx, offset, dma_offset, i;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
struct page *page = NULL;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
if (!icm)
goto out;
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) > dma_offset)
*dma_handle = sg_dma_address(&chunk->mem[i]) +
dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
}
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
if (chunk->mem[i].length > offset) {
page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
}
}
out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
{
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
int i, err;
for (i = start; i <= end; i += inc) {
err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
return 0;
fail:
while (i > start) {
i -= inc;
mlx4_table_put(dev, table, i);
}
return err;
}
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
{
int i;
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
mlx4_table_put(dev, table, i);
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
unsigned chunk_size;
int i;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
if (!table->icm)
return -ENOMEM;
table->virt = virt;
table->num_icm = num_icm;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
table->coherent = use_coherent;
mutex_init(&table->mutex);
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
chunk_size = MLX4_TABLE_CHUNK_SIZE;
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], use_coherent);
table->icm[i] = NULL;
goto err;
}
/*
* Add a reference to this ICM chunk so that it never
* gets freed (since it contains reserved firmware objects).
*/
++table->icm[i]->refcount;
}
return 0;
err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
return -ENOMEM;
}
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
{
int i;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table->icm);
}

View File

@@ -0,0 +1,136 @@
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX4_ICM_H
#define MLX4_ICM_H
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#define MLX4_ICM_CHUNK_LEN \
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
(sizeof (struct scatterlist)))
enum {
MLX4_ICM_PAGE_SHIFT = 12,
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
};
struct mlx4_icm_chunk {
struct list_head list;
int npages;
int nsg;
struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
};
struct mlx4_icm {
struct list_head chunk_list;
int refcount;
};
struct mlx4_icm_iter {
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk;
int page_idx;
};
struct mlx4_dev;
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent);
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
static inline void mlx4_icm_first(struct mlx4_icm *icm,
struct mlx4_icm_iter *iter)
{
iter->icm = icm;
iter->chunk = list_empty(&icm->chunk_list) ?
NULL : list_entry(icm->chunk_list.next,
struct mlx4_icm_chunk, list);
iter->page_idx = 0;
}
static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
{
return !iter->chunk;
}
static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
{
if (++iter->page_idx >= iter->chunk->nsg) {
if (iter->chunk->list.next == &iter->icm->chunk_list) {
iter->chunk = NULL;
return;
}
iter->chunk = list_entry(iter->chunk->list.next,
struct mlx4_icm_chunk, list);
iter->page_idx = 0;
}
}
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
{
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
}
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
{
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
#endif /* MLX4_ICM_H */

View File

@@ -0,0 +1,161 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "mlx4.h"
struct mlx4_device_context {
struct list_head list;
struct mlx4_interface *intf;
void *context;
};
static LIST_HEAD(intf_list);
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
{
struct mlx4_device_context *dev_ctx;
dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
if (!dev_ctx)
return;
dev_ctx->intf = intf;
dev_ctx->context = intf->add(&priv->dev);
if (dev_ctx->context) {
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
} else
kfree(dev_ctx);
}
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
{
struct mlx4_device_context *dev_ctx;
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf == intf) {
spin_lock_irq(&priv->ctx_lock);
list_del(&dev_ctx->list);
spin_unlock_irq(&priv->ctx_lock);
intf->remove(&priv->dev, dev_ctx->context);
kfree(dev_ctx);
return;
}
}
int mlx4_register_interface(struct mlx4_interface *intf)
{
struct mlx4_priv *priv;
if (!intf->add || !intf->remove)
return -EINVAL;
mutex_lock(&intf_mutex);
list_add_tail(&intf->list, &intf_list);
list_for_each_entry(priv, &dev_list, dev_list)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_register_interface);
void mlx4_unregister_interface(struct mlx4_interface *intf)
{
struct mlx4_priv *priv;
mutex_lock(&intf_mutex);
list_for_each_entry(priv, &dev_list, dev_list)
mlx4_remove_device(intf, priv);
list_del(&intf->list);
mutex_unlock(&intf_mutex);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_device_context *dev_ctx;
unsigned long flags;
spin_lock_irqsave(&priv->ctx_lock, flags);
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event)
dev_ctx->intf->event(dev, dev_ctx->context, type, port);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
int mlx4_register_device(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
mutex_lock(&intf_mutex);
list_add_tail(&priv->dev_list, &dev_list);
list_for_each_entry(intf, &intf_list, list)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
mlx4_start_catas_poll(dev);
return 0;
}
void mlx4_unregister_device(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
mlx4_remove_device(intf, priv);
list_del(&priv->dev_list);
mutex_unlock(&intf_mutex);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,364 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#define MGM_QPN_MASK 0x00FFFFFF
#define MGM_BLCK_LB_BIT 30
struct mlx4_mgm {
__be32 next_gid_index;
__be32 members_count;
u32 reserved[2];
u8 gid[16];
__be32 qp[MLX4_QP_PER_MGM];
};
static const u8 zero_gid[16]; /* automatically initialized to 0 */
static int mlx4_READ_MCG(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
u16 *hash)
{
u64 imm;
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH,
MLX4_CMD_TIME_CLASS_A);
if (!err)
*hash = imm;
return err;
}
/*
* Caller must hold MCG table semaphore. gid and mgm parameters must
* be properly aligned for command interface.
*
* Returns 0 unless a firmware command error occurs.
*
* If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
* and *mgm holds MGM entry.
*
* if GID is found in AMGM, *index = index in AMGM, *prev = index of
* previous entry in hash chain and *mgm holds AMGM entry.
*
* If no AMGM exists for given gid, *index = -1, *prev = index of last
* entry in hash chain and *mgm holds end of hash chain.
*/
static int find_mgm(struct mlx4_dev *dev,
u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
u16 *hash, int *prev, int *index)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return -ENOMEM;
mgid = mailbox->buf;
memcpy(mgid, gid, 16);
err = mlx4_MGID_HASH(dev, mailbox, hash);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
return err;
if (0)
mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
*index = *hash;
*prev = -1;
do {
err = mlx4_READ_MCG(dev, *index, mgm_mailbox);
if (err)
return err;
if (!memcmp(mgm->gid, zero_gid, 16)) {
if (*index != *hash) {
mlx4_err(dev, "Found zero MGID in AMGM.\n");
err = -EINVAL;
}
return err;
}
if (!memcmp(mgm->gid, gid, 16))
return err;
*prev = *index;
*index = be32_to_cpu(mgm->next_gid_index) >> 6;
} while (*index);
*index = -1;
return err;
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
u16 hash;
int index, prev;
int link = 0;
int i;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mgm = mailbox->buf;
mutex_lock(&priv->mcg_table.mutex);
err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
if (err)
goto out;
if (index != -1) {
if (!memcmp(mgm->gid, zero_gid, 16))
memcpy(mgm->gid, gid, 16);
} else {
link = 1;
index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
if (index == -1) {
mlx4_err(dev, "No AMGM entries left\n");
err = -ENOMEM;
goto out;
}
index += dev->caps.num_mgms;
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count);
if (members_count == MLX4_QP_PER_MGM) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
goto out;
}
for (i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
err = 0;
goto out;
}
if (block_mcast_loopback)
mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
(1U << MGM_BLCK_LB_BIT));
else
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count);
err = mlx4_WRITE_MCG(dev, index, mailbox);
if (err)
goto out;
if (!link)
goto out;
err = mlx4_READ_MCG(dev, prev, mailbox);
if (err)
goto out;
mgm->next_gid_index = cpu_to_be32(index << 6);
err = mlx4_WRITE_MCG(dev, prev, mailbox);
if (err)
goto out;
out:
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
mlx4_warn(dev, "Got AMGM index %d < %d",
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
index - dev->caps.num_mgms);
}
mutex_unlock(&priv->mcg_table.mutex);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
u16 hash;
int prev, index;
int i, loc;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mgm = mailbox->buf;
mutex_lock(&priv->mcg_table.mutex);
err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
if (err)
goto out;
if (index == -1) {
mlx4_err(dev, "MGID %pI6 not found\n", gid);
err = -EINVAL;
goto out;
}
members_count = be32_to_cpu(mgm->members_count);
for (loc = -1, i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
loc = i;
if (loc == -1) {
mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
err = -EINVAL;
goto out;
}
mgm->members_count = cpu_to_be32(--members_count);
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
if (i != 1) {
err = mlx4_WRITE_MCG(dev, index, mailbox);
goto out;
}
if (prev == -1) {
/* Remove entry from MGM */
int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
if (amgm_index) {
err = mlx4_READ_MCG(dev, amgm_index, mailbox);
if (err)
goto out;
} else
memset(mgm->gid, 0, 16);
err = mlx4_WRITE_MCG(dev, index, mailbox);
if (err)
goto out;
if (amgm_index) {
if (amgm_index < dev->caps.num_mgms)
mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
amgm_index - dev->caps.num_mgms);
}
} else {
/* Remove entry from AMGM */
int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
err = mlx4_READ_MCG(dev, prev, mailbox);
if (err)
goto out;
mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
err = mlx4_WRITE_MCG(dev, prev, mailbox);
if (err)
goto out;
if (index < dev->caps.num_mgms)
mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
index - dev->caps.num_mgms);
}
out:
mutex_unlock(&priv->mcg_table.mutex);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
int mlx4_init_mcg_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms - 1, 0, 0);
if (err)
return err;
mutex_init(&priv->mcg_table.mutex);
return 0;
}
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
}

View File

@@ -0,0 +1,406 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX4_H
#define MLX4_H
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
#define DRV_VERSION "0.01"
#define DRV_RELDATE "May 1, 2007"
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
MLX4_CLR_INT_SIZE = 0x00008
};
enum {
MLX4_MGM_ENTRY_SIZE = 0x100,
MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
MLX4_MTT_ENTRY_PER_SEG = 8
};
enum {
MLX4_NUM_PDS = 1 << 15
};
enum {
MLX4_CMPT_TYPE_QP = 0,
MLX4_CMPT_TYPE_SRQ = 1,
MLX4_CMPT_TYPE_CQ = 2,
MLX4_CMPT_TYPE_EQ = 3,
MLX4_CMPT_NUM_TYPE
};
enum {
MLX4_CMPT_SHIFT = 24,
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
#define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */
#define mlx4_dbg(mdev, format, arg...) \
do { \
if (mlx4_debug_level) \
dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
} while (0)
#define mlx4_err(mdev, format, arg...) \
dev_err(&mdev->pdev->dev, format, ## arg)
#define mlx4_info(mdev, format, arg...) \
dev_info(&mdev->pdev->dev, format, ## arg)
#define mlx4_warn(mdev, format, arg...) \
dev_warn(&mdev->pdev->dev, format, ## arg)
struct mlx4_bitmap {
u32 last;
u32 top;
u32 max;
u32 reserved_top;
u32 mask;
spinlock_t lock;
unsigned long *table;
};
struct mlx4_buddy {
unsigned long **bits;
unsigned int *num_free;
int max_order;
spinlock_t lock;
};
struct mlx4_icm;
struct mlx4_icm_table {
u64 virt;
int num_icm;
int num_obj;
int obj_size;
int lowmem;
int coherent;
struct mutex mutex;
struct mlx4_icm **icm;
};
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
int eqn;
u32 cons_index;
u16 irq;
u16 have_irq;
int nent;
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
};
struct mlx4_profile {
int num_qp;
int rdmarc_per_qp;
int num_srq;
int num_cq;
int num_mcg;
int num_mpt;
int num_mtt;
};
struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
u16 fw_pages;
u8 clr_int_bar;
u8 catas_bar;
};
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
struct mlx4_cmd_context *context;
u16 token_mask;
u8 use_events;
u8 toggle;
};
struct mlx4_uar_table {
struct mlx4_bitmap bitmap;
};
struct mlx4_mr_table {
struct mlx4_bitmap mpt_bitmap;
struct mlx4_buddy mtt_buddy;
u64 mtt_base;
u64 mpt_base;
struct mlx4_icm_table mtt_table;
struct mlx4_icm_table dmpt_table;
};
struct mlx4_cq_table {
struct mlx4_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
};
struct mlx4_eq_table {
struct mlx4_bitmap bitmap;
char *irq_names;
void __iomem *clr_int;
void __iomem **uar_map;
u32 clr_mask;
struct mlx4_eq *eq;
struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
int have_irq;
u8 inta_pin;
};
struct mlx4_srq_table {
struct mlx4_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
};
struct mlx4_qp_table {
struct mlx4_bitmap bitmap;
u32 rdmarc_base;
int rdmarc_shift;
spinlock_t lock;
struct mlx4_icm_table qp_table;
struct mlx4_icm_table auxc_table;
struct mlx4_icm_table altc_table;
struct mlx4_icm_table rdmarc_table;
struct mlx4_icm_table cmpt_table;
};
struct mlx4_mcg_table {
struct mutex mutex;
struct mlx4_bitmap bitmap;
struct mlx4_icm_table table;
};
struct mlx4_catas_err {
u32 __iomem *map;
struct timer_list timer;
struct list_head list;
};
#define MLX4_MAX_MAC_NUM 128
#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
struct mlx4_mac_table {
__be64 entries[MLX4_MAX_MAC_NUM];
int refs[MLX4_MAX_MAC_NUM];
struct mutex mutex;
int total;
int max;
};
#define MLX4_MAX_VLAN_NUM 128
#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
struct mlx4_vlan_table {
__be32 entries[MLX4_MAX_VLAN_NUM];
int refs[MLX4_MAX_VLAN_NUM];
struct mutex mutex;
int total;
int max;
};
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
char dev_name[16];
struct device_attribute port_attr;
enum mlx4_port_type tmp_type;
struct mlx4_mac_table mac_table;
struct mlx4_vlan_table vlan_table;
};
struct mlx4_sense {
struct mlx4_dev *dev;
u8 do_sense_port[MLX4_MAX_PORTS + 1];
u8 sense_allowed[MLX4_MAX_PORTS + 1];
struct delayed_work sense_poll;
};
struct mlx4_priv {
struct mlx4_dev dev;
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
struct list_head pgdir_list;
struct mutex pgdir_mutex;
struct mlx4_fw fw;
struct mlx4_cmd cmd;
struct mlx4_bitmap pd_bitmap;
struct mlx4_uar_table uar_table;
struct mlx4_mr_table mr_table;
struct mlx4_cq_table cq_table;
struct mlx4_eq_table eq_table;
struct mlx4_srq_table srq_table;
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
struct mlx4_catas_err catas_err;
void __iomem *clr_base;
struct mlx4_uar driver_uar;
void __iomem *kar;
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
struct mlx4_sense sense;
struct mutex port_mutex;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
{
return container_of(dev, struct mlx4_priv, dev);
}
#define MLX4_SENSE_RANGE (HZ * 3)
extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
int mlx4_reset(struct mlx4_dev *dev);
int mlx4_alloc_eq_table(struct mlx4_dev *dev);
void mlx4_free_eq_table(struct mlx4_dev *dev);
int mlx4_init_pd_table(struct mlx4_dev *dev);
int mlx4_init_uar_table(struct mlx4_dev *dev);
int mlx4_init_mr_table(struct mlx4_dev *dev);
int mlx4_init_eq_table(struct mlx4_dev *dev);
int mlx4_init_cq_table(struct mlx4_dev *dev);
int mlx4_init_qp_table(struct mlx4_dev *dev);
int mlx4_init_srq_table(struct mlx4_dev *dev);
int mlx4_init_mcg_table(struct mlx4_dev *dev);
void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
void mlx4_catas_init(void);
int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
struct mlx4_dev_cap;
struct mlx4_init_hca_param;
u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults);
void mlx4_start_sense(struct mlx4_dev *dev);
void mlx4_stop_sense(struct mlx4_dev *dev);
void mlx4_sense_init(struct mlx4_dev *dev);
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type);
int mlx4_change_port_types(struct mlx4_dev *dev,
enum mlx4_port_type *port_types);
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
#endif /* MLX4_H */

View File

@@ -0,0 +1,570 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _MLX4_EN_H_
#define _MLX4_EN_H_
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/inet_lro.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cq.h>
#include <linux/mlx4/srq.h>
#include <linux/mlx4/doorbell.h>
#include "en_port.h"
#define DRV_NAME "mlx4_en"
#define DRV_VERSION "1.4.1.1"
#define DRV_RELDATE "June 2009"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
#define en_print(level, priv, format, arg...) \
{ \
if ((priv)->registered) \
printk(level "%s: %s: " format, DRV_NAME, \
(priv->dev)->name, ## arg); \
else \
printk(level "%s: %s: Port %d: " format, \
DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
(priv)->port, ## arg); \
}
#define en_dbg(mlevel, priv, format, arg...) \
{ \
if (NETIF_MSG_##mlevel & priv->msg_enable) \
en_print(KERN_DEBUG, priv, format, ## arg) \
}
#define en_warn(priv, format, arg...) \
en_print(KERN_WARNING, priv, format, ## arg)
#define en_err(priv, format, arg...) \
en_print(KERN_ERR, priv, format, ## arg)
#define mlx4_err(mdev, format, arg...) \
printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
dev_name(&mdev->pdev->dev) , ## arg)
#define mlx4_info(mdev, format, arg...) \
printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
dev_name(&mdev->pdev->dev) , ## arg)
#define mlx4_warn(mdev, format, arg...) \
printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
dev_name(&mdev->pdev->dev) , ## arg)
/*
* Device constants
*/
#define MLX4_EN_PAGE_SHIFT 12
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
#define MAX_TX_RINGS 16
#define MAX_RX_RINGS 16
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
#define STAMP_SHIFT 31
#define STAMP_VAL 0x7fffffff
#define STATS_DELAY (HZ / 4)
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
#define MAX_DESC_SIZE 512
#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
/*
* OS related constants and tunables
*/
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
#define MLX4_EN_ALLOC_ORDER 2
#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
* and 4K allocations) */
enum {
FRAG_SZ0 = 512 - NET_IP_ALIGN,
FRAG_SZ1 = 1024,
FRAG_SZ2 = 4096,
FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
};
#define MLX4_EN_MAX_RX_FRAGS 4
/* Maximum ring sizes */
#define MLX4_EN_MAX_TX_SIZE 8192
#define MLX4_EN_MAX_RX_SIZE 8192
/* Minimum ring size for our page-allocation sceme to work */
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
#define MLX4_EN_SMALL_PKT_SIZE 64
#define MLX4_EN_NUM_TX_RINGS 8
#define MLX4_EN_NUM_PPP_RINGS 8
#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
#define MLX4_EN_TX_COAL_PKTS 5
#define MLX4_EN_TX_COAL_TIME 0x80
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
#define MLX4_EN_RX_RATE_HIGH 450000
#define MLX4_EN_RX_COAL_TIME_HIGH 128
#define MLX4_EN_RX_SIZE_THRESH 1024
#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
#define MLX4_EN_SAMPLE_INTERVAL 0
#define MLX4_EN_AUTO_CONF 0xffff
#define MLX4_EN_DEF_RX_PAUSE 1
#define MLX4_EN_DEF_TX_PAUSE 1
/* Interval between sucessive polls in the Tx routine when polling is used
instead of interrupts (in per-core Tx rings) - should be power of 2 */
#define MLX4_EN_TX_POLL_MODER 16
#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
#define ETH_LLC_SNAP_SIZE 8
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
#define MLX4_EN_MIN_MTU 46
#define ETH_BCAST 0xffffffffffffULL
#ifdef MLX4_EN_PERF_STAT
/* Number of samples to 'average' */
#define AVG_SIZE 128
#define AVG_FACTOR 1024
#define NUM_PERF_STATS NUM_PERF_COUNTERS
#define INC_PERF_COUNTER(cnt) (++(cnt))
#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
#define AVG_PERF_COUNTER(cnt, sample) \
((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
#define GET_PERF_COUNTER(cnt) (cnt)
#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
#else
#define NUM_PERF_STATS 0
#define INC_PERF_COUNTER(cnt) do {} while (0)
#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
#define GET_PERF_COUNTER(cnt) (0)
#define GET_AVG_PERF_COUNTER(cnt) (0)
#endif /* MLX4_EN_PERF_STAT */
/*
* Configurables
*/
enum cq_type {
RX = 0,
TX = 1,
};
/*
* Useful macros
*/
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y))
#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
struct sk_buff *skb;
u32 nr_txbb;
u8 linear;
u8 data_offset;
u8 inl;
};
#define MLX4_EN_BIT_DESC_OWN 0x80000000
#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
#define MLX4_EN_MEMTYPE_PAD 0x100
#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
struct mlx4_en_tx_desc {
struct mlx4_wqe_ctrl_seg ctrl;
union {
struct mlx4_wqe_data_seg data; /* at least one data segment */
struct mlx4_wqe_lso_seg lso;
struct mlx4_wqe_inline_seg inl;
};
};
#define MLX4_EN_USE_SRQ 0x01000000
struct mlx4_en_rx_alloc {
struct page *page;
u16 offset;
};
struct mlx4_en_tx_ring {
struct mlx4_hwq_resources wqres;
u32 size ; /* number of TXBBs */
u32 size_mask;
u16 stride;
u16 cqn; /* index of port CQ associated with this ring */
u32 prod;
u32 cons;
u32 buf_size;
u32 doorbell_qpn;
void *buf;
u16 poll_cnt;
int blocked;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context;
int qpn;
enum mlx4_qp_state qp_state;
struct mlx4_srq dummy;
unsigned long bytes;
unsigned long packets;
spinlock_t comp_lock;
};
struct mlx4_en_rx_desc {
/* actual number of entries depends on rx ring stride */
struct mlx4_wqe_data_seg data[0];
};
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
struct net_lro_mgr lro;
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
u32 prod;
u32 cons;
u32 buf_size;
void *buf;
void *rx_info;
unsigned long bytes;
unsigned long packets;
};
static inline int mlx4_en_can_lro(__be16 status)
{
return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_STATUS_IPV4F |
MLX4_CQE_STATUS_IPV6 |
MLX4_CQE_STATUS_IPV4OPT |
MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP |
MLX4_CQE_STATUS_IPOK)) ==
cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_STATUS_IPOK |
MLX4_CQE_STATUS_TCP);
}
struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
/* Per-core Tx cq processing support */
struct timer_list timer;
int size;
int buf_size;
unsigned vector;
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
};
struct mlx4_en_port_profile {
u32 flags;
u32 tx_ring_num;
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
};
struct mlx4_en_profile {
int rss_xor;
int num_lro;
u8 rss_mask;
u32 active_ports;
u32 small_pkt_int;
u8 no_reset;
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
};
struct mlx4_en_dev {
struct mlx4_dev *dev;
struct pci_dev *pdev;
struct mutex state_lock;
struct net_device *pndev[MLX4_MAX_PORTS + 1];
u32 port_cnt;
bool device_up;
struct mlx4_en_profile profile;
u32 LSO_support;
struct workqueue_struct *workqueue;
struct device *dma_device;
void __iomem *uar_map;
struct mlx4_uar priv_uar;
struct mlx4_mr mr;
u32 priv_pdn;
spinlock_t uar_lock;
};
struct mlx4_en_rss_map {
int base_qpn;
struct mlx4_qp qps[MAX_RX_RINGS];
enum mlx4_qp_state state[MAX_RX_RINGS];
struct mlx4_qp indir_qp;
enum mlx4_qp_state indir_state;
};
struct mlx4_en_rss_context {
__be32 base_qpn;
__be32 default_qpn;
u16 reserved;
u8 hash_fn;
u8 flags;
__be32 rss_key[10];
};
struct mlx4_en_pkt_stats {
unsigned long broadcast;
unsigned long rx_prio[8];
unsigned long tx_prio[8];
#define NUM_PKT_STATS 17
};
struct mlx4_en_port_stats {
unsigned long lro_aggregated;
unsigned long lro_flushed;
unsigned long lro_no_desc;
unsigned long tso_packets;
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
unsigned long rx_alloc_failed;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long tx_chksum_offload;
#define NUM_PORT_STATS 11
};
struct mlx4_en_perf_stats {
u32 tx_poll;
u64 tx_pktsz_avg;
u32 inflight_avg;
u16 tx_coal_avg;
u16 rx_coal_avg;
u32 napi_quota;
#define NUM_PERF_COUNTERS 6
};
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
u16 frag_stride;
u16 frag_align;
u16 last_offset;
};
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
struct net_device *dev;
struct vlan_group *vlgrp;
struct net_device_stats stats;
struct net_device_stats ret_stats;
spinlock_t stats_lock;
unsigned long last_moder_packets;
unsigned long last_moder_tx_packets;
unsigned long last_moder_bytes;
unsigned long last_moder_jiffies;
int last_moder_time;
u16 rx_usecs;
u16 rx_frames;
u16 tx_usecs;
u16 tx_frames;
u32 pkt_rate_low;
u16 rx_usecs_low;
u32 pkt_rate_high;
u16 rx_usecs_high;
u16 sample_interval;
u16 adaptive_rx_coal;
u32 msg_enable;
struct mlx4_hwq_resources res;
int link_state;
int last_link_state;
bool port_up;
int port;
int registered;
int allocated;
int stride;
int rx_csum;
u64 mac;
int mac_index;
unsigned max_mtu;
int base_qpn;
struct mlx4_en_rss_map rss_map;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
u16 num_frags;
u16 log_rx_info;
struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct work_struct mcast_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
struct dev_mc_list *mc_list;
struct mlx4_en_stat_out_mbox hw_stats;
};
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
int mlx4_en_start_port(struct net_device *dev);
void mlx4_en_stop_port(struct net_device *dev);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode);
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_poll_tx_cq(unsigned long data);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
u32 size, u16 stride);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq);
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
u32 size, u16 stride);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring);
int mlx4_en_process_rx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
int mlx4_en_map_buffer(struct mlx4_buf *buf);
void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
/*
* Globals
*/
extern const struct ethtool_ops mlx4_en_ethtool_ops;
#endif

View File

@@ -0,0 +1,666 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
struct mlx4_mpt_entry {
__be32 flags;
__be32 qpn;
__be32 key;
__be32 pd_flags;
__be64 start;
__be64 length;
__be32 lkey;
__be32 win_cnt;
u8 reserved1[3];
u8 mtt_rep;
__be64 mtt_seg;
__be32 mtt_sz;
__be32 entity_size;
__be32 first_byte_offset;
} __attribute__((packed));
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{
int o;
int m;
u32 seg;
spin_lock(&buddy->lock);
for (o = order; o <= buddy->max_order; ++o)
if (buddy->num_free[o]) {
m = 1 << (buddy->max_order - o);
seg = find_first_bit(buddy->bits[o], m);
if (seg < m)
goto found;
}
spin_unlock(&buddy->lock);
return -1;
found:
clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
spin_unlock(&buddy->lock);
seg <<= order;
return seg;
}
static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
{
seg >>= order;
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
}
static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
{
int i, s;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
GFP_KERNEL);
if (!buddy->bits || !buddy->num_free)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
}
set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
kfree(buddy->num_free);
return -ENOMEM;
}
static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
{
int i;
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
}
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
if (seg == -1)
return -1;
if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
seg + (1 << order) - 1)) {
mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
return -1;
}
return seg;
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
struct mlx4_mtt *mtt)
{
int i;
if (!npages) {
mtt->order = -1;
mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
return 0;
} else
mtt->page_shift = page_shift;
for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
++mtt->order;
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
if (mtt->first_seg == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
if (mtt->order < 0)
return;
mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
static u32 hw_index_to_key(u32 ind)
{
return (ind >> 24) | (ind << 8);
}
static u32 key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
MLX4_CMD_TIME_CLASS_B);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
!mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
}
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int npages, int page_shift, struct mlx4_mr *mr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u32 index;
int err;
index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
if (index == -1)
return -ENOMEM;
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
mr->enabled = 0;
mr->key = hw_index_to_key(index);
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
if (mr->enabled) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
}
mlx4_mtt_cleanup(dev, &mr->mtt);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
if (err)
return err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_table;
}
mpt_entry = mailbox->buf;
memset(mpt_entry, 0, sizeof *mpt_entry);
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->start = cpu_to_be64(mr->iova);
mpt_entry->length = cpu_to_be64(mr->size);
mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_seg = 0;
} else {
mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
/* fast register MR in free state */
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
dev->caps.mtts_per_seg);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
if (err) {
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
mr->enabled = 1;
mlx4_free_cmd_mailbox(dev, mailbox);
return 0;
err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list)
{
struct mlx4_priv *priv = mlx4_priv(dev);
__be64 *mtts;
dma_addr_t dma_handle;
int i;
int s = start_index * sizeof (u64);
/* All MTTs must fit in the same page */
if (start_index / (PAGE_SIZE / sizeof (u64)) !=
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
return -EINVAL;
if (start_index & (dev->caps.mtts_per_seg - 1))
return -EINVAL;
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts)
return -ENOMEM;
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE);
return 0;
}
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list)
{
int chunk;
int err;
if (mtt->order < 0)
return -EINVAL;
while (npages > 0) {
chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
npages -= chunk;
start_index += chunk;
page_list += chunk;
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf)
{
u64 *page_list;
int err;
int i;
page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
if (!page_list)
return -ENOMEM;
for (i = 0; i < buf->npages; ++i)
if (buf->nbufs == 1)
page_list[i] = buf->direct.map + (i << buf->page_shift);
else
page_list[i] = buf->page_list[i].map;
err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
kfree(page_list);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
int err;
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
ilog2(dev->caps.num_mtt_segs));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
mlx4_warn(dev, "MTT table of order %d is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
}
}
return 0;
err_reserve_mtts:
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
err_buddy:
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
return err;
}
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
int npages, u64 iova)
{
int i, page_mask;
if (npages > fmr->max_pages)
return -EINVAL;
page_mask = (1 << fmr->page_shift) - 1;
/* We are getting page lists, so va must be page aligned. */
if (iova & page_mask)
return -EINVAL;
/* Trust the user not to pass misaligned data in page_list */
if (0)
for (i = 0; i < npages; ++i) {
if (page_list[i] & ~page_mask)
return -EINVAL;
}
if (fmr->maps >= fmr->max_maps)
return -EINVAL;
return 0;
}
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
int npages, u64 iova, u32 *lkey, u32 *rkey)
{
u32 key;
int i, err;
err = mlx4_check_fmr(fmr, page_list, npages, iova);
if (err)
return err;
++fmr->maps;
key = key_to_hw_index(fmr->mr.key);
key += dev->caps.num_mpts;
*lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
*(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
/* Make sure MPT status is visible before writing MTT entries */
wmb();
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key);
fmr->mpt->lkey = cpu_to_be32(key);
fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
fmr->mpt->start = cpu_to_be64(iova);
/* Make MTT entries are visible before setting MPT status */
wmb();
*(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
/* Make sure MPT status is visible before consumer can use FMR */
wmb();
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 mtt_seg;
int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
return -EINVAL;
/* All MTTs must fit in the same page */
if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
return -EINVAL;
fmr->page_shift = page_shift;
fmr->max_pages = max_pages;
fmr->max_maps = max_maps;
fmr->maps = 0;
err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
page_shift, &fmr->mr);
if (err)
return err;
mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
fmr->mr.mtt.first_seg,
&fmr->dma_handle);
if (!fmr->mtts) {
err = -ENOMEM;
goto err_free;
}
return 0;
err_free:
mlx4_mr_free(dev, &fmr->mr);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
err = mlx4_mr_enable(dev, &fmr->mr);
if (err)
return err;
fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
key_to_hw_index(fmr->mr.key), NULL);
if (!fmr->mpt)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
if (!fmr->maps)
return;
fmr->maps = 0;
*(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
if (fmr->maps)
return -EBUSY;
fmr->mr.enabled = 0;
mlx4_mr_free(dev, &fmr->mr);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);

View File

@@ -0,0 +1,108 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <asm/page.h>
#include "mlx4.h"
#include "icm.h"
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
if (*pdn == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
int mlx4_init_pd_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
(1 << 24) - 1, dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
}
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
if (dev->caps.num_uars <= 128) {
mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
dev->caps.num_uars);
mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
return -ENODEV;
}
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
max(128, dev->caps.reserved_uars), 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
}

View File

@@ -0,0 +1,317 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#define MLX4_MAC_VALID (1ull << 63)
#define MLX4_MAC_MASK 0xffffffffffffULL
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
mutex_init(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
table->entries[i] = 0;
table->refs[i] = 0;
}
table->max = 1 << dev->caps.log_num_macs;
table->total = 0;
}
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
{
int i;
mutex_init(&table->mutex);
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
table->entries[i] = 0;
table->refs[i] = 0;
}
table->max = 1 << dev->caps.log_num_vlans;
table->total = 0;
}
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
struct mlx4_cmd_mailbox *mailbox;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
{
struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
int i, err = 0;
int free = -1;
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
if (free < 0 && !table->refs[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
/* MAC already registered, increase refernce count */
*index = i;
++table->refs[i];
goto out;
}
}
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
/* No free mac entries */
err = -ENOSPC;
goto out;
}
/* Register new MAC */
table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
table->refs[free] = 0;
table->entries[free] = 0;
goto out;
}
*index = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
mutex_lock(&table->mutex);
if (!table->refs[index]) {
mlx4_warn(dev, "No MAC entry for index %d\n", index);
goto out;
}
if (--table->refs[index]) {
mlx4_warn(dev, "Have more references for index %d,"
"no need to modify MAC table\n", index);
goto out;
}
table->entries[index] = 0;
mlx4_set_port_mac_table(dev, port, table->entries);
--table->total;
out:
mutex_unlock(&table->mutex);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
struct mlx4_cmd_mailbox *mailbox;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
int free = -1;
mutex_lock(&table->mutex);
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if (free < 0 && (table->refs[i] == 0)) {
free = i;
continue;
}
if (table->refs[i] &&
(vlan == (MLX4_VLAN_MASK &
be32_to_cpu(table->entries[i])))) {
/* Vlan already registered, increase refernce count */
*index = i;
++table->refs[i];
goto out;
}
}
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
goto out;
}
/* Register new MAC */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
err = mlx4_set_port_vlan_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
table->refs[free] = 0;
table->entries[free] = 0;
goto out;
}
*index = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
if (index < MLX4_VLAN_REGULAR) {
mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
return;
}
mutex_lock(&table->mutex);
if (!table->refs[index]) {
mlx4_warn(dev, "No vlan entry for index %d\n", index);
goto out;
}
if (--table->refs[index]) {
mlx4_dbg(dev, "Have more references for index %d,"
"no need to modify vlan table\n", index);
goto out;
}
table->entries[index] = 0;
mlx4_set_port_vlan_table(dev, port, table->entries);
--table->total;
out:
mutex_unlock(&table->mutex);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
{
struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
u8 *inbuf, *outbuf;
int err;
inmailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inmailbox))
return PTR_ERR(inmailbox);
outmailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outmailbox)) {
mlx4_free_cmd_mailbox(dev, inmailbox);
return PTR_ERR(outmailbox);
}
inbuf = inmailbox->buf;
outbuf = outmailbox->buf;
memset(inbuf, 0, 256);
memset(outbuf, 0, 256);
inbuf[0] = 1;
inbuf[1] = 1;
inbuf[2] = 1;
inbuf[3] = 1;
*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
mlx4_free_cmd_mailbox(dev, outmailbox);
return err;
}
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, 256);
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}

View File

@@ -0,0 +1,238 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "mlx4.h"
#include "fw.h"
enum {
MLX4_RES_QP,
MLX4_RES_RDMARC,
MLX4_RES_ALTC,
MLX4_RES_AUXC,
MLX4_RES_SRQ,
MLX4_RES_CQ,
MLX4_RES_EQ,
MLX4_RES_DMPT,
MLX4_RES_CMPT,
MLX4_RES_MTT,
MLX4_RES_MCG,
MLX4_RES_NUM
};
static const char *res_name[] = {
[MLX4_RES_QP] = "QP",
[MLX4_RES_RDMARC] = "RDMARC",
[MLX4_RES_ALTC] = "ALTC",
[MLX4_RES_AUXC] = "AUXC",
[MLX4_RES_SRQ] = "SRQ",
[MLX4_RES_CQ] = "CQ",
[MLX4_RES_EQ] = "EQ",
[MLX4_RES_DMPT] = "DMPT",
[MLX4_RES_CMPT] = "CMPT",
[MLX4_RES_MTT] = "MTT",
[MLX4_RES_MCG] = "MCG",
};
u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource {
u64 size;
u64 start;
int type;
int num;
int log_num;
};
u64 total_size = 0;
struct mlx4_resource *profile;
struct mlx4_resource tmp;
int i, j;
profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
if (!profile)
return -ENOMEM;
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
profile[MLX4_RES_ALTC].num = request->num_qp;
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
dev_cap->reserved_eqs +
num_possible_cpus() + 1);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt;
profile[MLX4_RES_MCG].num = request->num_mcg;
for (i = 0; i < MLX4_RES_NUM; ++i) {
profile[i].type = i;
profile[i].num = roundup_pow_of_two(profile[i].num);
profile[i].log_num = ilog2(profile[i].num);
profile[i].size *= profile[i].num;
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
}
/*
* Sort the resources in decreasing order of size. Since they
* all have sizes that are powers of 2, we'll be able to keep
* resources aligned to their size and pack them without gaps
* using the sorted order.
*/
for (i = MLX4_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
if (profile[j].size > profile[j - 1].size) {
tmp = profile[j];
profile[j] = profile[j - 1];
profile[j - 1] = tmp;
}
}
for (i = 0; i < MLX4_RES_NUM; ++i) {
if (profile[i].size) {
profile[i].start = total_size;
total_size += profile[i].size;
}
if (total_size > dev_cap->max_icm_sz) {
mlx4_err(dev, "Profile requires 0x%llx bytes; "
"won't fit in 0x%llx bytes of context memory.\n",
(unsigned long long) total_size,
(unsigned long long) dev_cap->max_icm_sz);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
"size 0x%10llx\n",
i, res_name[profile[i].type], profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
(int) (total_size >> 10));
for (i = 0; i < MLX4_RES_NUM; ++i) {
switch (profile[i].type) {
case MLX4_RES_QP:
dev->caps.num_qps = profile[i].num;
init_hca->qpc_base = profile[i].start;
init_hca->log_num_qps = profile[i].log_num;
break;
case MLX4_RES_RDMARC:
for (priv->qp_table.rdmarc_shift = 0;
request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
++priv->qp_table.rdmarc_shift)
; /* nothing */
dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
priv->qp_table.rdmarc_base = (u32) profile[i].start;
init_hca->rdmarc_base = profile[i].start;
init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
break;
case MLX4_RES_ALTC:
init_hca->altc_base = profile[i].start;
break;
case MLX4_RES_AUXC:
init_hca->auxc_base = profile[i].start;
break;
case MLX4_RES_SRQ:
dev->caps.num_srqs = profile[i].num;
init_hca->srqc_base = profile[i].start;
init_hca->log_num_srqs = profile[i].log_num;
break;
case MLX4_RES_CQ:
dev->caps.num_cqs = profile[i].num;
init_hca->cqc_base = profile[i].start;
init_hca->log_num_cqs = profile[i].log_num;
break;
case MLX4_RES_EQ:
dev->caps.num_eqs = profile[i].num;
init_hca->eqc_base = profile[i].start;
init_hca->log_num_eqs = profile[i].log_num;
break;
case MLX4_RES_DMPT:
dev->caps.num_mpts = profile[i].num;
priv->mr_table.mpt_base = profile[i].start;
init_hca->dmpt_base = profile[i].start;
init_hca->log_mpt_sz = profile[i].log_num;
break;
case MLX4_RES_CMPT:
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
dev->caps.num_mtt_segs = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
case MLX4_RES_MCG:
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->log_mc_hash_sz = profile[i].log_num - 1;
break;
default:
break;
}
}
/*
* PDs don't take any HCA memory, but we assign them as part
* of the HCA profile anyway.
*/
dev->caps.num_pds = MLX4_NUM_PDS;
kfree(profile);
return total_size;
}

View File

@@ -0,0 +1,379 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
#include "mlx4.h"
#include "icm.h"
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_qp *qp;
spin_lock(&qp_table->lock);
qp = __mlx4_qp_lookup(dev, qpn);
if (qp)
atomic_inc(&qp->refcount);
spin_unlock(&qp_table->lock);
if (!qp) {
mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
return;
}
qp->event(qp, event_type);
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
int sqd_event, struct mlx4_qp *qp)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
},
[MLX4_QP_STATE_INIT] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
[MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
},
[MLX4_QP_STATE_RTR] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
},
[MLX4_QP_STATE_RTS] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
[MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
},
[MLX4_QP_STATE_SQD] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
[MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
},
[MLX4_QP_STATE_SQER] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
},
[MLX4_QP_STATE_ERR] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
}
};
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
return mlx4_cmd(dev, 0, qp->qpn, 2,
MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
context->mtt_base_addr_h = mtt_addr >> 32;
context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int qpn;
qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
if (qpn == -1)
return -ENOMEM;
*base = qpn;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
if (base_qpn < dev->caps.sqp_start + 8)
return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
if (!qpn)
return -EINVAL;
qp->qpn = qpn;
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
if (err)
goto err_put_qp;
err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
if (err)
goto err_put_auxc;
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
if (err)
goto err_put_altc;
err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
if (err)
goto err_put_rdmarc;
spin_lock_irq(&qp_table->lock);
err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
spin_unlock_irq(&qp_table->lock);
if (err)
goto err_put_cmpt;
atomic_set(&qp->refcount, 1);
init_completion(&qp->free);
return 0;
err_put_cmpt:
mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
err_put_rdmarc:
mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
err_put_altc:
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
err_put_auxc:
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
err_put_qp:
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
err_out:
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
unsigned long flags;
spin_lock_irqsave(&qp_table->lock, flags);
radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
spin_unlock_irqrestore(&qp_table->lock, flags);
}
EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
MLX4_CMD_TIME_CLASS_B);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
int reserved_from_top = 0;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
/*
* We reserve 2 extra QPs per port for the special QPs. The
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*/
dev->caps.sqp_start =
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
{
int sort[MLX4_NUM_QP_REGION];
int i, j, tmp;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
sort[i] = i;
for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
for (j = 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
dev->caps.reserved_qps_cnt[sort[j - 1]]) {
tmp = sort[j];
sort[j] = sort[j - 1];
sort[j - 1] = tmp;
}
}
}
for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
dev->caps.reserved_qps_base[sort[i]] = last_base;
reserved_from_top +=
dev->caps.reserved_qps_cnt[sort[i]];
}
}
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
(1 << 23) - 1, dev->caps.sqp_start + 8,
reserved_from_top);
if (err)
return err;
return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
}
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_query);
int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_qp_context *context,
struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
{
int err;
int i;
enum mlx4_qp_state states[] = {
MLX4_QP_STATE_RST,
MLX4_QP_STATE_INIT,
MLX4_QP_STATE_RTR,
MLX4_QP_STATE_RTS
};
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
mlx4_err(dev, "Failed to bring QP to state: "
"%d with error: %d\n",
states[i + 1], err);
return err;
}
*qp_state = states[i + 1];
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);

View File

@@ -0,0 +1,185 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include "mlx4.h"
int mlx4_reset(struct mlx4_dev *dev)
{
void __iomem *reset;
u32 *hca_header = NULL;
int pcie_cap;
u16 devctl;
u16 linkctl;
u16 vendor;
unsigned long end;
u32 sem;
int i;
int err = 0;
#define MLX4_RESET_BASE 0xf0000
#define MLX4_RESET_SIZE 0x400
#define MLX4_SEM_OFFSET 0x3fc
#define MLX4_RESET_OFFSET 0x10
#define MLX4_RESET_VALUE swab32(1)
#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
/*
* Reset the chip. This is somewhat ugly because we have to
* save off the PCI header before reset and then restore it
* after the chip reboots. We skip config space offsets 22
* and 23 since those have a special meaning.
*/
/* Do we need to save off the full 4K PCI Express header?? */
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
mlx4_err(dev, "Couldn't allocate memory to save HCA "
"PCI header, aborting.\n");
goto out;
}
pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
for (i = 0; i < 64; ++i) {
if (i == 22 || i == 23)
continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't save HCA "
"PCI header, aborting.\n");
goto out;
}
}
reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
goto out;
}
/* grab HW semaphore to lock out flash updates */
end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
do {
sem = readl(reset + MLX4_SEM_OFFSET);
if (!sem)
break;
msleep(1);
} while (time_before(jiffies, end));
if (sem) {
mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
err = -EAGAIN;
iounmap(reset);
goto out;
}
/* actually hit reset */
writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
iounmap(reset);
/* Docs say to wait one second before accessing device */
msleep(1000);
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
do {
if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
vendor != 0xffff)
break;
msleep(1);
} while (time_before(jiffies, end));
if (vendor == 0xffff) {
err = -ENODEV;
mlx4_err(dev, "PCI device did not come back after reset, "
"aborting.\n");
goto out;
}
/* Now restore the PCI headers */
if (pcie_cap) {
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n");
goto out;
}
}
for (i = 0; i < 16; ++i) {
if (i * 4 == PCI_COMMAND)
continue;
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA reg %x, "
"aborting.\n", i);
goto out;
}
}
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA COMMAND, "
"aborting.\n");
goto out;
}
out:
kfree(hca_header);
return err;
}

View File

@@ -0,0 +1,156 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
}
if (out_param > 2) {
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
return EINVAL;
}
*type = out_param;
return 0;
}
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults)
{
struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
int err;
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
stype[i - 1] = 0;
if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
if (err)
stype[i - 1] = defaults[i - 1];
} else
stype[i - 1] = defaults[i - 1];
}
/*
* Adjust port configuration:
* If port 1 sensed nothing and port 2 is IB, set both as IB
* If port 2 sensed nothing and port 1 is Eth, set both as Eth
*/
if (stype[0] == MLX4_PORT_TYPE_ETH) {
for (i = 1; i < dev->caps.num_ports; i++)
stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
}
if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
for (i = 0; i < dev->caps.num_ports - 1; i++)
stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
}
/*
* If sensed nothing, remain in current configuration.
*/
for (i = 0; i < dev->caps.num_ports; i++)
stype[i] = stype[i] ? stype[i] : defaults[i];
}
static void mlx4_sense_port(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
sense_poll);
struct mlx4_dev *dev = sense->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
enum mlx4_port_type stype[MLX4_MAX_PORTS];
mutex_lock(&priv->port_mutex);
mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
if (mlx4_check_port_params(dev, stype))
goto sense_again;
if (mlx4_change_port_types(dev, stype))
mlx4_err(dev, "Failed to change port_types\n");
sense_again:
mutex_unlock(&priv->port_mutex);
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_start_sense(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
return;
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_stop_sense(struct mlx4_dev *dev)
{
cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
}
void mlx4_sense_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
int port;
sense->dev = dev;
for (port = 1; port <= dev->caps.num_ports; port++)
sense->do_sense_port[port] = 1;
INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
}

View File

@@ -0,0 +1,256 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
struct mlx4_srq_context {
__be32 state_logsize_srqn;
u8 logstride;
u8 reserved1[3];
u8 pg_offset;
u8 reserved2[3];
u32 reserved3;
u8 log_page_size;
u8 reserved4[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
__be32 pd;
__be16 limit_watermark;
__be16 wqe_cnt;
u16 reserved5;
__be16 wqe_counter;
u32 reserved6;
__be64 db_rec_addr;
};
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
spin_lock(&srq_table->lock);
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
if (srq)
atomic_inc(&srq->refcount);
spin_unlock(&srq_table->lock);
if (!srq) {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
srq->event(srq, event_type);
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
}
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
MLX4_CMD_TIME_CLASS_A);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
MLX4_CMD_TIME_CLASS_B);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
MLX4_CMD_TIME_CLASS_A);
}
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
u64 db_rec, struct mlx4_srq *srq)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_srq_context *srq_context;
u64 mtt_addr;
int err;
srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
if (srq->srqn == -1)
return -ENOMEM;
err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
if (err)
goto err_put;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
goto err_cmpt_put;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_radix;
}
srq_context = mailbox->buf;
memset(srq_context, 0, sizeof *srq_context);
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn);
srq_context->logstride = srq->wqe_shift - 4;
srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
srq_context->mtt_base_addr_h = mtt_addr >> 32;
srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
srq_context->pd = cpu_to_be32(pdn);
srq_context->db_rec_addr = cpu_to_be64(db_rec);
err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
return 0;
err_radix:
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
err_cmpt_put:
mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
err_put:
mlx4_table_put(dev, &srq_table->table, srq->srqn);
err_out:
mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
int err;
err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
if (err)
mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
mlx4_table_put(dev, &srq_table->table, srq->srqn);
mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
{
return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
}
EXPORT_SYMBOL_GPL(mlx4_srq_arm);
int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_srq_context *srq_context;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
srq_context = mailbox->buf;
err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
if (err)
goto err_out;
*limit_watermark = be16_to_cpu(srq_context->limit_watermark);
err_out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_query);
int mlx4_init_srq_table(struct mlx4_dev *dev)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
int err;
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
if (err)
return err;
return 0;
}
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}