add idl4k kernel firmware version 1.13.0.105

This commit is contained in:
Jaroslav Kysela
2015-03-26 17:22:37 +01:00
parent 5194d2792e
commit e9070cdc77
31064 changed files with 12769984 additions and 0 deletions

29
kernel/net/8021q/Kconfig Normal file
View File

@@ -0,0 +1,29 @@
#
# Configuration for 802.1Q VLAN support
#
config VLAN_8021Q
tristate "802.1Q VLAN Support"
---help---
Select this and you will be able to create 802.1Q VLAN interfaces
on your ethernet interfaces. 802.1Q VLAN supports almost
everything a regular ethernet interface does, including
firewalling, bridging, and of course IP traffic. You will need
the 'vconfig' tool from the VLAN project in order to effectively
use VLANs. See the VLAN web page for more information:
<http://www.candelatech.com/~greear/vlan.html>
To compile this code as a module, choose M here: the module
will be called 8021q.
If unsure, say N.
config VLAN_8021Q_GVRP
bool "GVRP (GARP VLAN Registration Protocol) support"
depends on VLAN_8021Q
select GARP
help
Select this to enable GVRP end-system support. GVRP is used for
automatic propagation of registered VLANs to switches.
If unsure, say N.

10
kernel/net/8021q/Makefile Normal file
View File

@@ -0,0 +1,10 @@
#
# Makefile for the Linux VLAN layer.
#
obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o
obj-$(CONFIG_VLAN_8021Q) += 8021q.o
8021q-y := vlan.o vlan_dev.o vlan_netlink.o
8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
8021q-$(CONFIG_PROC_FS) += vlanproc.o

784
kernel/net/8021q/vlan.c Normal file
View File

@@ -0,0 +1,784 @@
/*
* INET 802.1Q VLAN
* Ethernet-type device handling.
*
* Authors: Ben Greear <greearb@candelatech.com>
* Please send support related email to: netdev@vger.kernel.org
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
*
* Fixes:
* Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
* Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
* Correct all the locking - David S. Miller <davem@redhat.com>;
* Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <net/p8022.h>
#include <net/arp.h>
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <asm/uaccess.h>
#include <linux/if_vlan.h>
#include "vlan.h"
#include "vlanproc.h"
#define DRV_VERSION "1.8"
/* Global VLAN variables */
int vlan_net_id;
/* Our listing of VLAN group(s) */
static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
const char vlan_fullname[] = "802.1Q VLAN Support";
const char vlan_version[] = DRV_VERSION;
static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
static struct packet_type vlan_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_8021Q),
.func = vlan_skb_recv, /* VLAN receive method */
};
/* End of global variables definitions. */
static inline unsigned int vlan_grp_hashfn(unsigned int idx)
{
return ((idx >> VLAN_GRP_HASH_SHIFT) ^ idx) & VLAN_GRP_HASH_MASK;
}
/* Must be invoked with RCU read lock (no preempt) */
static struct vlan_group *__vlan_find_group(struct net_device *real_dev)
{
struct vlan_group *grp;
struct hlist_node *n;
int hash = vlan_grp_hashfn(real_dev->ifindex);
hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) {
if (grp->real_dev == real_dev)
return grp;
}
return NULL;
}
/* Find the protocol handler. Assumes VID < VLAN_VID_MASK.
*
* Must be invoked with RCU read lock (no preempt)
*/
struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id)
{
struct vlan_group *grp = __vlan_find_group(real_dev);
if (grp)
return vlan_group_get_device(grp, vlan_id);
return NULL;
}
static void vlan_group_free(struct vlan_group *grp)
{
int i;
for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
kfree(grp->vlan_devices_arrays[i]);
kfree(grp);
}
static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
{
struct vlan_group *grp;
grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
if (!grp)
return NULL;
grp->real_dev = real_dev;
hlist_add_head_rcu(&grp->hlist,
&vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
return grp;
}
static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
{
struct net_device **array;
unsigned int size;
ASSERT_RTNL();
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
if (array != NULL)
return 0;
size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
array = kzalloc(size, GFP_KERNEL);
if (array == NULL)
return -ENOBUFS;
vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array;
return 0;
}
static void vlan_rcu_free(struct rcu_head *rcu)
{
vlan_group_free(container_of(rcu, struct vlan_group, rcu));
}
void unregister_vlan_dev(struct net_device *dev)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
struct vlan_group *grp;
u16 vlan_id = vlan->vlan_id;
ASSERT_RTNL();
grp = __vlan_find_group(real_dev);
BUG_ON(!grp);
/* Take it out of our own structures, but be sure to interlock with
* HW accelerating devices or SW vlan input packet processing.
*/
if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
vlan_group_set_device(grp, vlan_id, NULL);
grp->nr_vlans--;
synchronize_net();
unregister_netdevice(dev);
/* If the group is now empty, kill off the group. */
if (grp->nr_vlans == 0) {
vlan_gvrp_uninit_applicant(real_dev);
if (real_dev->features & NETIF_F_HW_VLAN_RX)
ops->ndo_vlan_rx_register(real_dev, NULL);
hlist_del_rcu(&grp->hlist);
/* Free the group, after all cpu's are done. */
call_rcu(&grp->rcu, vlan_rcu_free);
}
/* Get rid of the vlan's reference to real_dev */
dev_put(real_dev);
}
static void vlan_transfer_operstate(const struct net_device *dev,
struct net_device *vlandev)
{
/* Have to respect userspace enforced dormant state
* of real device, also must allow supplicant running
* on VLAN device
*/
if (dev->operstate == IF_OPER_DORMANT)
netif_dormant_on(vlandev);
else
netif_dormant_off(vlandev);
if (netif_carrier_ok(dev)) {
if (!netif_carrier_ok(vlandev))
netif_carrier_on(vlandev);
} else {
if (netif_carrier_ok(vlandev))
netif_carrier_off(vlandev);
}
}
int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
{
const char *name = real_dev->name;
const struct net_device_ops *ops = real_dev->netdev_ops;
if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_info("8021q: VLANs not supported on %s\n", name);
return -EOPNOTSUPP;
}
if ((real_dev->features & NETIF_F_HW_VLAN_RX) && !ops->ndo_vlan_rx_register) {
pr_info("8021q: device %s has buggy VLAN hw accel\n", name);
return -EOPNOTSUPP;
}
if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
(!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
return -EOPNOTSUPP;
}
if (__find_vlan_dev(real_dev, vlan_id) != NULL)
return -EEXIST;
return 0;
}
int register_vlan_dev(struct net_device *dev)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
u16 vlan_id = vlan->vlan_id;
struct vlan_group *grp, *ngrp = NULL;
int err;
grp = __vlan_find_group(real_dev);
if (!grp) {
ngrp = grp = vlan_group_alloc(real_dev);
if (!grp)
return -ENOBUFS;
err = vlan_gvrp_init_applicant(real_dev);
if (err < 0)
goto out_free_group;
}
err = vlan_group_prealloc_vid(grp, vlan_id);
if (err < 0)
goto out_uninit_applicant;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_applicant;
/* Account for reference in struct vlan_dev_info */
dev_hold(real_dev);
vlan_transfer_operstate(real_dev, dev);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
/* So, got the sucker initialized, now lets place
* it into our local structure.
*/
vlan_group_set_device(grp, vlan_id, dev);
grp->nr_vlans++;
if (ngrp && real_dev->features & NETIF_F_HW_VLAN_RX)
ops->ndo_vlan_rx_register(real_dev, ngrp);
if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
return 0;
out_uninit_applicant:
if (ngrp)
vlan_gvrp_uninit_applicant(real_dev);
out_free_group:
if (ngrp) {
hlist_del_rcu(&ngrp->hlist);
/* Free the group, after all cpu's are done. */
call_rcu(&ngrp->rcu, vlan_rcu_free);
}
return err;
}
/* Attach a VLAN device to a mac address (ie Ethernet Card).
* Returns 0 if the device was created or a negative error code otherwise.
*/
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
{
struct net_device *new_dev;
struct net *net = dev_net(real_dev);
struct vlan_net *vn = net_generic(net, vlan_net_id);
char name[IFNAMSIZ];
int err;
if (vlan_id >= VLAN_VID_MASK)
return -ERANGE;
err = vlan_check_real_dev(real_dev, vlan_id);
if (err < 0)
return err;
/* Gotta set up the fields for the device. */
switch (vn->name_type) {
case VLAN_NAME_TYPE_RAW_PLUS_VID:
/* name will look like: eth1.0005 */
snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
break;
case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
/* Put our vlan.VID in the name.
* Name will look like: vlan5
*/
snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
break;
case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
/* Put our vlan.VID in the name.
* Name will look like: eth0.5
*/
snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
break;
case VLAN_NAME_TYPE_PLUS_VID:
/* Put our vlan.VID in the name.
* Name will look like: vlan0005
*/
default:
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
}
new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
vlan_setup, real_dev->num_tx_queues);
if (new_dev == NULL)
return -ENOBUFS;
new_dev->real_num_tx_queues = real_dev->real_num_tx_queues;
dev_net_set(new_dev, net);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
*/
new_dev->mtu = real_dev->mtu;
vlan_dev_info(new_dev)->vlan_id = vlan_id;
vlan_dev_info(new_dev)->real_dev = real_dev;
vlan_dev_info(new_dev)->dent = NULL;
vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev);
if (err < 0)
goto out_free_newdev;
return 0;
out_free_newdev:
free_netdev(new_dev);
return err;
}
static void vlan_sync_address(struct net_device *dev,
struct net_device *vlandev)
{
struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
/* May be called without an actual change */
if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
return;
/* vlan address was different from the old address and is equal to
* the new address */
if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
!compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
dev_unicast_delete(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
dev_unicast_add(dev, vlandev->dev_addr);
memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
}
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
unsigned long old_features = vlandev->features;
vlandev->features &= ~dev->vlan_features;
vlandev->features |= dev->features & dev->vlan_features;
vlandev->gso_max_size = dev->gso_max_size;
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
#endif
if (old_features != vlandev->features)
netdev_features_change(vlandev);
}
static void __vlan_device_event(struct net_device *dev, unsigned long event)
{
switch (event) {
case NETDEV_CHANGENAME:
vlan_proc_rem_dev(dev);
if (vlan_proc_add_dev(dev) < 0)
pr_warning("8021q: failed to change proc name for %s\n",
dev->name);
break;
case NETDEV_REGISTER:
if (vlan_proc_add_dev(dev) < 0)
pr_warning("8021q: failed to add proc entry for %s\n",
dev->name);
break;
case NETDEV_UNREGISTER:
vlan_proc_rem_dev(dev);
break;
}
}
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
struct vlan_group *grp;
int i, flgs;
struct net_device *vlandev;
if (is_vlan_dev(dev))
__vlan_device_event(dev, event);
grp = __vlan_find_group(dev);
if (!grp)
goto out;
/* It is OK that we do not hold the group lock right now,
* as we run under the RTNL lock.
*/
switch (event) {
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
vlan_transfer_operstate(dev, vlandev);
}
break;
case NETDEV_CHANGEADDR:
/* Adjust unicast filters on underlying device */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
flgs = vlandev->flags;
if (!(flgs & IFF_UP))
continue;
vlan_sync_address(dev, vlandev);
}
break;
case NETDEV_CHANGEMTU:
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
if (vlandev->mtu <= dev->mtu)
continue;
dev_set_mtu(vlandev, dev->mtu);
}
break;
case NETDEV_FEAT_CHANGE:
/* Propagate device features to underlying device */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
vlan_transfer_features(dev, vlandev);
}
break;
case NETDEV_DOWN:
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
flgs = vlandev->flags;
if (!(flgs & IFF_UP))
continue;
dev_change_flags(vlandev, flgs & ~IFF_UP);
vlan_transfer_operstate(dev, vlandev);
}
break;
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
flgs = vlandev->flags;
if (flgs & IFF_UP)
continue;
dev_change_flags(vlandev, flgs | IFF_UP);
vlan_transfer_operstate(dev, vlandev);
}
break;
case NETDEV_UNREGISTER:
/* Delete all VLANs for this dev. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
/* unregistration of last vlan destroys group, abort
* afterwards */
if (grp->nr_vlans == 1)
i = VLAN_GROUP_ARRAY_LEN;
unregister_vlan_dev(vlandev);
}
break;
}
out:
return NOTIFY_DONE;
}
static struct notifier_block vlan_notifier_block __read_mostly = {
.notifier_call = vlan_device_event,
};
/*
* VLAN IOCTL handler.
* o execute requested action or pass command to the device driver
* arg is really a struct vlan_ioctl_args __user *.
*/
static int vlan_ioctl_handler(struct net *net, void __user *arg)
{
int err;
struct vlan_ioctl_args args;
struct net_device *dev = NULL;
if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
return -EFAULT;
/* Null terminate this sucker, just in case. */
args.device1[23] = 0;
args.u.device2[23] = 0;
rtnl_lock();
switch (args.cmd) {
case SET_VLAN_INGRESS_PRIORITY_CMD:
case SET_VLAN_EGRESS_PRIORITY_CMD:
case SET_VLAN_FLAG_CMD:
case ADD_VLAN_CMD:
case DEL_VLAN_CMD:
case GET_VLAN_REALDEV_NAME_CMD:
case GET_VLAN_VID_CMD:
err = -ENODEV;
dev = __dev_get_by_name(net, args.device1);
if (!dev)
goto out;
err = -EINVAL;
if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
goto out;
}
switch (args.cmd) {
case SET_VLAN_INGRESS_PRIORITY_CMD:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
vlan_dev_set_ingress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
err = 0;
break;
case SET_VLAN_EGRESS_PRIORITY_CMD:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
err = vlan_dev_set_egress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
break;
case SET_VLAN_FLAG_CMD:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
err = vlan_dev_change_flags(dev,
args.vlan_qos ? args.u.flag : 0,
args.u.flag);
break;
case SET_VLAN_NAME_TYPE_CMD:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
if ((args.u.name_type >= 0) &&
(args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
struct vlan_net *vn;
vn = net_generic(net, vlan_net_id);
vn->name_type = args.u.name_type;
err = 0;
} else {
err = -EINVAL;
}
break;
case ADD_VLAN_CMD:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
err = register_vlan_device(dev, args.u.VID);
break;
case DEL_VLAN_CMD:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
unregister_vlan_dev(dev);
err = 0;
break;
case GET_VLAN_REALDEV_NAME_CMD:
err = 0;
vlan_dev_get_realdev_name(dev, args.u.device2);
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
break;
case GET_VLAN_VID_CMD:
err = 0;
args.u.VID = vlan_dev_vlan_id(dev);
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
break;
default:
err = -EOPNOTSUPP;
break;
}
out:
rtnl_unlock();
return err;
}
static int vlan_init_net(struct net *net)
{
int err;
struct vlan_net *vn;
err = -ENOMEM;
vn = kzalloc(sizeof(struct vlan_net), GFP_KERNEL);
if (vn == NULL)
goto err_alloc;
err = net_assign_generic(net, vlan_net_id, vn);
if (err < 0)
goto err_assign;
vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
err = vlan_proc_init(net);
if (err < 0)
goto err_proc;
return 0;
err_proc:
/* nothing */
err_assign:
kfree(vn);
err_alloc:
return err;
}
static void vlan_exit_net(struct net *net)
{
struct vlan_net *vn;
vn = net_generic(net, vlan_net_id);
rtnl_kill_links(net, &vlan_link_ops);
vlan_proc_cleanup(net);
kfree(vn);
}
static struct pernet_operations vlan_net_ops = {
.init = vlan_init_net,
.exit = vlan_exit_net,
};
static int __init vlan_proto_init(void)
{
int err;
pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
pr_info("All bugs added by %s\n", vlan_buggyright);
err = register_pernet_gen_device(&vlan_net_id, &vlan_net_ops);
if (err < 0)
goto err0;
err = register_netdevice_notifier(&vlan_notifier_block);
if (err < 0)
goto err2;
err = vlan_gvrp_init();
if (err < 0)
goto err3;
err = vlan_netlink_init();
if (err < 0)
goto err4;
dev_add_pack(&vlan_packet_type);
vlan_ioctl_set(vlan_ioctl_handler);
return 0;
err4:
vlan_gvrp_uninit();
err3:
unregister_netdevice_notifier(&vlan_notifier_block);
err2:
unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
err0:
return err;
}
static void __exit vlan_cleanup_module(void)
{
unsigned int i;
vlan_ioctl_set(NULL);
vlan_netlink_fini();
unregister_netdevice_notifier(&vlan_notifier_block);
dev_remove_pack(&vlan_packet_type);
/* This table must be empty if there are no module references left. */
for (i = 0; i < VLAN_GRP_HASH_SIZE; i++)
BUG_ON(!hlist_empty(&vlan_group_hash[i]));
unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
vlan_gvrp_uninit();
}
module_init(vlan_proto_init);
module_exit(vlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);

136
kernel/net/8021q/vlan.h Normal file
View File

@@ -0,0 +1,136 @@
#ifndef __BEN_VLAN_802_1Q_INC__
#define __BEN_VLAN_802_1Q_INC__
#include <linux/if_vlan.h>
/**
* struct vlan_priority_tci_mapping - vlan egress priority mappings
* @priority: skb priority
* @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
* @next: pointer to next struct
*/
struct vlan_priority_tci_mapping {
u32 priority;
u16 vlan_qos;
struct vlan_priority_tci_mapping *next;
};
/**
* struct vlan_dev_info - VLAN private device data
* @nr_ingress_mappings: number of ingress priority mappings
* @ingress_priority_map: ingress priority mappings
* @nr_egress_mappings: number of egress priority mappings
* @egress_priority_map: hash of egress priority mappings
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
* @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
*/
struct vlan_dev_info {
unsigned int nr_ingress_mappings;
u32 ingress_priority_map[8];
unsigned int nr_egress_mappings;
struct vlan_priority_tci_mapping *egress_priority_map[16];
u16 vlan_id;
u16 flags;
struct net_device *real_dev;
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
unsigned long cnt_inc_headroom_on_tx;
unsigned long cnt_encap_on_xmit;
};
static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
{
return netdev_priv(dev);
}
#define VLAN_GRP_HASH_SHIFT 5
#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
/* Find a VLAN device by the MAC address of its Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* to be box-wide, so the MAC will be ignored. The mac will only be
* looked at if we are configured to have a separate set of VLANs per
* each MAC addressable interface. Note that this latter option does
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*
* Must be invoked with rcu_read_lock (ie preempt disabled)
* or with RTNL.
*/
struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id);
/* found in vlan_dev.c */
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev);
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id);
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
u16 vlan_tci)
{
struct vlan_dev_info *vip = vlan_dev_info(dev);
return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7];
}
#ifdef CONFIG_VLAN_8021Q_GVRP
extern int vlan_gvrp_request_join(const struct net_device *dev);
extern void vlan_gvrp_request_leave(const struct net_device *dev);
extern int vlan_gvrp_init_applicant(struct net_device *dev);
extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
extern int vlan_gvrp_init(void);
extern void vlan_gvrp_uninit(void);
#else
static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; }
static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {}
static inline int vlan_gvrp_init(void) { return 0; }
static inline void vlan_gvrp_uninit(void) {}
#endif
extern const char vlan_fullname[];
extern const char vlan_version[];
extern int vlan_netlink_init(void);
extern void vlan_netlink_fini(void);
extern struct rtnl_link_ops vlan_link_ops;
static inline int is_vlan_dev(struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}
extern int vlan_net_id;
struct proc_dir_entry;
struct vlan_net {
/* /proc/net/vlan */
struct proc_dir_entry *proc_vlan_dir;
/* /proc/net/vlan/config */
struct proc_dir_entry *proc_vlan_conf;
/* Determines interface naming scheme. */
unsigned short name_type;
};
#endif /* !(__BEN_VLAN_802_1Q_INC__) */

View File

@@ -0,0 +1,132 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
#include "vlan.h"
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
if (netpoll_rx(skb))
return NET_RX_DROP;
if (skb_bond_should_drop(skb))
goto drop;
skb->vlan_tci = vlan_tci;
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
if (!skb->dev)
goto drop;
return (polling ? netif_receive_skb(skb) : netif_rx(skb));
drop:
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);
int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct net_device_stats *stats;
skb->dev = vlan_dev_info(dev)->real_dev;
netif_nit_deliver(skb);
skb->dev = dev;
skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
skb->vlan_tci = 0;
stats = &dev->stats;
stats->rx_packets++;
stats->rx_bytes += skb->len;
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
};
return 0;
}
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
return vlan_dev_info(dev)->real_dev;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
return vlan_dev_info(dev)->vlan_id;
}
EXPORT_SYMBOL(vlan_dev_vlan_id);
static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
{
struct sk_buff *p;
if (skb_bond_should_drop(skb))
goto drop;
skb->vlan_tci = vlan_tci;
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
if (!skb->dev)
goto drop;
for (p = napi->gro_list; p; p = p->next) {
NAPI_GRO_CB(p)->same_flow =
p->dev == skb->dev && !compare_ether_header(
skb_mac_header(p), skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->flush = 0;
}
return dev_gro_receive(napi, skb);
drop:
return GRO_DROP;
}
int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
{
if (netpoll_rx_on(skb))
return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
skb_gro_reset_offset(skb);
return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
}
EXPORT_SYMBOL(vlan_gro_receive);
int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci)
{
struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
return NET_RX_DROP;
if (netpoll_rx_on(skb)) {
skb->protocol = eth_type_trans(skb, skb->dev);
return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
}
return napi_frags_finish(napi, skb,
vlan_gro_common(napi, grp, vlan_tci, skb));
}
EXPORT_SYMBOL(vlan_gro_frags);

832
kernel/net/8021q/vlan_dev.c Normal file
View File

@@ -0,0 +1,832 @@
/* -*- linux-c -*-
* INET 802.1Q VLAN
* Ethernet-type device handling.
*
* Authors: Ben Greear <greearb@candelatech.com>
* Please send support related email to: netdev@vger.kernel.org
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
*
* Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
* - reset skb->pkt_type on incoming packets when MAC was changed
* - see that changed MAC is saddr for outgoing packets
* Oct 20, 2001: Ard van Breeman:
* - Fix MC-list, finally.
* - Flush MC-list on VLAN destroy.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <net/arp.h>
#include "vlan.h"
#include "vlanproc.h"
#include <linux/if_vlan.h>
/*
* Rebuild the Ethernet MAC header. This is called after an ARP
* (or in future other address resolution) has completed on this
* sk_buff. We now let ARP fill in the other fields.
*
* This routine CANNOT use cached dst->neigh!
* Really, it is used only when dst->neigh is wrong.
*
* TODO: This needs a checkup, I'm ignorant here. --BLG
*/
static int vlan_dev_rebuild_header(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
switch (veth->h_vlan_encapsulated_proto) {
#ifdef CONFIG_INET
case htons(ETH_P_IP):
/* TODO: Confirm this will work with VLAN headers... */
return arp_find(veth->h_dest, skb);
#endif
default:
pr_debug("%s: unable to resolve type %X addresses.\n",
dev->name, ntohs(veth->h_vlan_encapsulated_proto));
memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
break;
}
return 0;
}
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
{
if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
if (skb_cow(skb, skb_headroom(skb)) < 0)
skb = NULL;
if (skb) {
/* Lifted from Gleb's VLAN code... */
memmove(skb->data - ETH_HLEN,
skb->data - VLAN_ETH_HLEN, 12);
skb->mac_header += VLAN_HLEN;
}
}
return skb;
}
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
__be16 proto;
unsigned char *rawp;
/*
* Was a VLAN packet, grab the encapsulated protocol, which the layer
* three protocols care about.
*/
proto = vhdr->h_vlan_encapsulated_proto;
if (ntohs(proto) >= 1536) {
skb->protocol = proto;
return;
}
rawp = skb->data;
if (*(unsigned short *)rawp == 0xFFFF)
/*
* This is a magic hack to spot IPX packets. Older Novell
* breaks the protocol design and runs IPX over 802.3 without
* an 802.2 LLC layer. We look for FFFF which isn't a used
* 802.2 SSAP/DSAP. This won't work for fault tolerant netware
* but does for the rest.
*/
skb->protocol = htons(ETH_P_802_3);
else
/*
* Real 802.2 LLC
*/
skb->protocol = htons(ETH_P_802_2);
}
/*
* Determine the packet's protocol ID. The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*
* Also, at this point we assume that we ARE dealing exclusively with
* VLAN packets, or packets that should be made into VLAN packets based
* on a default VLAN ID.
*
* NOTE: Should be similar to ethernet/eth.c.
*
* SANITY NOTE: This method is called when a packet is moving up the stack
* towards userland. To get here, it would have already passed
* through the ethernet/eth.c eth_type_trans() method.
* SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
* stored UNALIGNED in the memory. RISC systems don't like
* such cases very much...
* SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
* aligned, so there doesn't need to be any of the unaligned
* stuff. It has been commented out now... --Ben
*
*/
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
struct vlan_hdr *vhdr;
struct net_device_stats *stats;
u16 vlan_id;
u16 vlan_tci;
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
goto err_free;
if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
goto err_free;
vhdr = (struct vlan_hdr *)skb->data;
vlan_tci = ntohs(vhdr->h_vlan_TCI);
vlan_id = vlan_tci & VLAN_VID_MASK;
rcu_read_lock();
skb->dev = __find_vlan_dev(dev, vlan_id);
if (!skb->dev) {
pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
__func__, vlan_id, dev->name);
goto err_unlock;
}
stats = &skb->dev->stats;
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb_pull_rcsum(skb, VLAN_HLEN);
skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
pr_debug("%s: priority: %u for TCI: %hu\n",
__func__, skb->priority, vlan_tci);
switch (skb->pkt_type) {
case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
/* stats->broadcast ++; // no such counter :-( */
break;
case PACKET_MULTICAST:
stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly.
*/
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
skb->dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
default:
break;
}
vlan_set_encap_proto(skb, vhdr);
skb = vlan_check_reorder_header(skb);
if (!skb) {
stats->rx_errors++;
goto err_unlock;
}
netif_rx(skb);
rcu_read_unlock();
return NET_RX_SUCCESS;
err_unlock:
rcu_read_unlock();
err_free:
kfree_skb(skb);
return NET_RX_DROP;
}
static inline u16
vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
{
struct vlan_priority_tci_mapping *mp;
mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
while (mp) {
if (mp->priority == skb->priority) {
return mp->vlan_qos; /* This should already be shifted
* to mask correctly with the
* VLAN's TCI */
}
mp = mp->next;
}
return 0;
}
/*
* Create the VLAN header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*
* This is called when the SKB is moving down the stack towards the
* physical devices.
*/
static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
struct vlan_hdr *vhdr;
unsigned int vhdrlen = 0;
u16 vlan_tci = 0;
int rc;
if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
return -ENOSPC;
if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
vlan_tci = vlan_dev_info(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
vhdr->h_vlan_TCI = htons(vlan_tci);
/*
* Set the protocol type. For a packet of type ETH_P_802_3 we
* put the length in here instead. It is up to the 802.2
* layer to carry protocol information.
*/
if (type != ETH_P_802_3)
vhdr->h_vlan_encapsulated_proto = htons(type);
else
vhdr->h_vlan_encapsulated_proto = htons(len);
skb->protocol = htons(ETH_P_8021Q);
type = ETH_P_8021Q;
vhdrlen = VLAN_HLEN;
}
/* Before delegating work to the lower layer, enter our MAC-address */
if (saddr == NULL)
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
dev = vlan_dev_info(dev)->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
return rc;
}
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int i = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
unsigned int len;
int ret;
/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
*
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
unsigned int orig_headroom = skb_headroom(skb);
u16 vlan_tci;
vlan_dev_info(dev)->cnt_encap_on_xmit++;
vlan_tci = vlan_dev_info(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
skb = __vlan_put_tag(skb, vlan_tci);
if (!skb) {
txq->tx_dropped++;
return NETDEV_TX_OK;
}
if (orig_headroom < VLAN_HLEN)
vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
}
skb->dev = vlan_dev_info(dev)->real_dev;
len = skb->len;
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
txq->tx_dropped++;
return NETDEV_TX_OK;
}
static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int i = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
u16 vlan_tci;
unsigned int len;
int ret;
vlan_tci = vlan_dev_info(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_dev_info(dev)->real_dev;
len = skb->len;
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
txq->tx_dropped++;
return NETDEV_TX_OK;
}
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* TODO: gotta make sure the underlying layer can handle it,
* maybe an IFF_VLAN_CAPABLE flag for devices?
*/
if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
return 0;
}
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
vlan->nr_ingress_mappings--;
else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
vlan->nr_ingress_mappings++;
vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
}
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct vlan_priority_tci_mapping *mp = NULL;
struct vlan_priority_tci_mapping *np;
u32 vlan_qos = (vlan_prio << 13) & 0xE000;
/* See if a priority mapping exists.. */
mp = vlan->egress_priority_map[skb_prio & 0xF];
while (mp) {
if (mp->priority == skb_prio) {
if (mp->vlan_qos && !vlan_qos)
vlan->nr_egress_mappings--;
else if (!mp->vlan_qos && vlan_qos)
vlan->nr_egress_mappings++;
mp->vlan_qos = vlan_qos;
return 0;
}
mp = mp->next;
}
/* Create a new mapping then. */
mp = vlan->egress_priority_map[skb_prio & 0xF];
np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
if (!np)
return -ENOBUFS;
np->next = mp;
np->priority = skb_prio;
np->vlan_qos = vlan_qos;
vlan->egress_priority_map[skb_prio & 0xF] = np;
if (vlan_qos)
vlan->nr_egress_mappings++;
return 0;
}
/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
u32 old_flags = vlan->flags;
if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
return -EINVAL;
vlan->flags = (old_flags & ~mask) | (flags & mask);
if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
else
vlan_gvrp_request_leave(dev);
}
return 0;
}
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
{
strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
}
static int vlan_dev_open(struct net_device *dev)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
int err;
if (!(real_dev->flags & IFF_UP))
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
err = dev_unicast_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
}
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(real_dev, 1);
if (err < 0)
goto del_unicast;
}
if (dev->flags & IFF_PROMISC) {
err = dev_set_promiscuity(real_dev, 1);
if (err < 0)
goto clear_allmulti;
}
memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN);
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
netif_carrier_on(dev);
return 0;
clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
del_unicast:
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
dev_unicast_delete(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
return err;
}
static int vlan_dev_stop(struct net_device *dev)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_leave(dev);
dev_mc_unsync(real_dev, dev);
dev_unicast_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
dev_unicast_delete(real_dev, dev->dev_addr);
netif_carrier_off(dev);
return 0;
}
static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
struct sockaddr *addr = p;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP))
goto out;
if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
err = dev_unicast_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
dev_unicast_delete(real_dev, dev->dev_addr);
out:
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
return 0;
}
static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
struct ifreq ifrr;
int err = -EOPNOTSUPP;
strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
break;
}
if (!err)
ifr->ifr_ifru = ifrr.ifr_ifru;
return err;
}
static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int err = 0;
if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
err = ops->ndo_neigh_setup(real_dev, pa);
return err;
}
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
if (ops->ndo_fcoe_ddp_setup)
rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
return rc;
}
static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int len = 0;
if (ops->ndo_fcoe_ddp_done)
len = ops->ndo_fcoe_ddp_done(real_dev, xid);
return len;
}
static int vlan_dev_fcoe_enable(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_enable)
rc = ops->ndo_fcoe_enable(real_dev);
return rc;
}
static int vlan_dev_fcoe_disable(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_disable)
rc = ops->ndo_fcoe_disable(real_dev);
return rc;
}
#endif
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
if (change & IFF_ALLMULTI)
dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
}
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
}
/*
* vlan network devices have devices nesting below it, and are a special
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key vlan_netdev_xmit_lock_key;
static struct lock_class_key vlan_netdev_addr_lock_key;
static void vlan_dev_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_subclass)
{
lockdep_set_class_and_subclass(&txq->_xmit_lock,
&vlan_netdev_xmit_lock_key,
*(int *)_subclass);
}
static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
{
lockdep_set_class_and_subclass(&dev->addr_list_lock,
&vlan_netdev_addr_lock_key,
subclass);
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
}
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
.rebuild = vlan_dev_rebuild_header,
.parse = eth_header_parse,
};
static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
int subclass = 0;
netif_carrier_off(dev);
/* IFF_BROADCAST|IFF_MULTICAST; ??? */
dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
dev->iflink = real_dev->ifindex;
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
dev->features |= real_dev->features & real_dev->vlan_features;
dev->gso_max_size = real_dev->gso_max_size;
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
if (is_zero_ether_addr(dev->dev_addr))
memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
#endif
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
dev->header_ops = real_dev->header_ops;
dev->hard_header_len = real_dev->hard_header_len;
dev->netdev_ops = &vlan_netdev_accel_ops;
} else {
dev->header_ops = &vlan_header_ops;
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
dev->netdev_ops = &vlan_netdev_ops;
}
if (is_vlan_dev(real_dev))
subclass = 1;
vlan_dev_set_lockdep_class(dev, subclass);
return 0;
}
static void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_info *vlan = vlan_dev_info(dev);
int i;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
kfree(pm);
}
}
}
static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
return dev_ethtool_get_settings(vlan->real_dev, cmd);
}
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, vlan_fullname);
strcpy(info->version, vlan_version);
strcpy(info->fw_version, "N/A");
}
static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
return dev_ethtool_get_rx_csum(vlan->real_dev);
}
static u32 vlan_ethtool_get_flags(struct net_device *dev)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
return dev_ethtool_get_flags(vlan->real_dev);
}
static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_rx_csum = vlan_ethtool_get_rx_csum,
.get_flags = vlan_ethtool_get_flags,
};
static const struct net_device_ops vlan_netdev_ops = {
.ndo_change_mtu = vlan_dev_change_mtu,
.ndo_init = vlan_dev_init,
.ndo_uninit = vlan_dev_uninit,
.ndo_open = vlan_dev_open,
.ndo_stop = vlan_dev_stop,
.ndo_start_xmit = vlan_dev_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
.ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
.ndo_fcoe_enable = vlan_dev_fcoe_enable,
.ndo_fcoe_disable = vlan_dev_fcoe_disable,
#endif
};
static const struct net_device_ops vlan_netdev_accel_ops = {
.ndo_change_mtu = vlan_dev_change_mtu,
.ndo_init = vlan_dev_init,
.ndo_uninit = vlan_dev_uninit,
.ndo_open = vlan_dev_open,
.ndo_stop = vlan_dev_stop,
.ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
.ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
.ndo_fcoe_enable = vlan_dev_fcoe_enable,
.ndo_fcoe_disable = vlan_dev_fcoe_disable,
#endif
};
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = free_netdev;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);
}

View File

@@ -0,0 +1,66 @@
/*
* IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
*
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <net/garp.h>
#include "vlan.h"
#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
enum gvrp_attributes {
GVRP_ATTR_INVALID,
GVRP_ATTR_VID,
__GVRP_ATTR_MAX
};
#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
static struct garp_application vlan_gvrp_app __read_mostly = {
.proto.group_address = GARP_GVRP_ADDRESS,
.maxattr = GVRP_ATTR_MAX,
.type = GARP_APPLICATION_GVRP,
};
int vlan_gvrp_request_join(const struct net_device *dev)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
__be16 vlan_id = htons(vlan->vlan_id);
return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
}
void vlan_gvrp_request_leave(const struct net_device *dev)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
__be16 vlan_id = htons(vlan->vlan_id);
garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
}
int vlan_gvrp_init_applicant(struct net_device *dev)
{
return garp_init_applicant(dev, &vlan_gvrp_app);
}
void vlan_gvrp_uninit_applicant(struct net_device *dev)
{
garp_uninit_applicant(dev, &vlan_gvrp_app);
}
int __init vlan_gvrp_init(void)
{
return garp_register_application(&vlan_gvrp_app);
}
void vlan_gvrp_uninit(void)
{
garp_unregister_application(&vlan_gvrp_app);
}

View File

@@ -0,0 +1,259 @@
/*
* VLAN netlink control interface
*
* Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include "vlan.h"
static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
[IFLA_VLAN_ID] = { .type = NLA_U16 },
[IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
[IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
};
static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
[IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
};
static inline int vlan_validate_qos_map(struct nlattr *attr)
{
if (!attr)
return 0;
return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy);
}
static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
{
struct ifla_vlan_flags *flags;
u16 id;
int err;
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (!data)
return -EINVAL;
if (data[IFLA_VLAN_ID]) {
id = nla_get_u16(data[IFLA_VLAN_ID]);
if (id >= VLAN_VID_MASK)
return -ERANGE;
}
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
if ((flags->flags & flags->mask) &
~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
return -EINVAL;
}
err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
if (err < 0)
return err;
err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
if (err < 0)
return err;
return 0;
}
static int vlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
int rem;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
vlan_dev_change_flags(dev, flags->flags, flags->mask);
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_egress_priority(dev, m->from, m->to);
}
}
return 0;
}
static int vlan_get_tx_queues(struct net *net,
struct nlattr *tb[],
unsigned int *num_tx_queues,
unsigned int *real_num_tx_queues)
{
struct net_device *real_dev;
if (!tb[IFLA_LINK])
return -EINVAL;
real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
*num_tx_queues = real_dev->num_tx_queues;
*real_num_tx_queues = real_dev->real_num_tx_queues;
return 0;
}
static int vlan_newlink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev;
int err;
if (!data[IFLA_VLAN_ID])
return -EINVAL;
if (!tb[IFLA_LINK])
return -EINVAL;
real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
vlan->real_dev = real_dev;
vlan->flags = VLAN_FLAG_REORDER_HDR;
err = vlan_check_real_dev(real_dev, vlan->vlan_id);
if (err < 0)
return err;
if (!tb[IFLA_MTU])
dev->mtu = real_dev->mtu;
else if (dev->mtu > real_dev->mtu)
return -EINVAL;
err = vlan_changelink(dev, tb, data);
if (err < 0)
return err;
return register_vlan_dev(dev);
}
static inline size_t vlan_qos_map_size(unsigned int n)
{
if (n == 0)
return 0;
/* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
return nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
}
static size_t vlan_get_size(const struct net_device *dev)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
return nla_total_size(2) + /* IFLA_VLAN_ID */
sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}
static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct vlan_priority_tci_mapping *pm;
struct ifla_vlan_flags f;
struct ifla_vlan_qos_mapping m;
struct nlattr *nest;
unsigned int i;
NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
if (vlan->flags) {
f.flags = vlan->flags;
f.mask = ~0;
NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
}
if (vlan->nr_ingress_mappings) {
nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
if (!vlan->ingress_priority_map[i])
continue;
m.from = i;
m.to = vlan->ingress_priority_map[i];
NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
sizeof(m), &m);
}
nla_nest_end(skb, nest);
}
if (vlan->nr_egress_mappings) {
nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
for (pm = vlan->egress_priority_map[i]; pm;
pm = pm->next) {
if (!pm->vlan_qos)
continue;
m.from = pm->priority;
m.to = (pm->vlan_qos >> 13) & 0x7;
NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
sizeof(m), &m);
}
}
nla_nest_end(skb, nest);
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
struct rtnl_link_ops vlan_link_ops __read_mostly = {
.kind = "vlan",
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
.priv_size = sizeof(struct vlan_dev_info),
.get_tx_queues = vlan_get_tx_queues,
.setup = vlan_setup,
.validate = vlan_validate,
.newlink = vlan_newlink,
.changelink = vlan_changelink,
.dellink = unregister_vlan_dev,
.get_size = vlan_get_size,
.fill_info = vlan_fill_info,
};
int __init vlan_netlink_init(void)
{
return rtnl_link_register(&vlan_link_ops);
}
void __exit vlan_netlink_fini(void)
{
rtnl_link_unregister(&vlan_link_ops);
}
MODULE_ALIAS_RTNL_LINK("vlan");

331
kernel/net/8021q/vlanproc.c Normal file
View File

@@ -0,0 +1,331 @@
/******************************************************************************
* vlanproc.c VLAN Module. /proc filesystem interface.
*
* This module is completely hardware-independent and provides
* access to the router using Linux /proc filesystem.
*
* Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c
* by: Gene Kozin <genek@compuserve.com>
*
* Copyright: (c) 1998 Ben Greear
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
* Jan 20, 1998 Ben Greear Initial Version
*****************************************************************************/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "vlanproc.h"
#include "vlan.h"
/****** Function Prototypes *************************************************/
/* Methods for preparing data for reading proc entries */
static int vlan_seq_show(struct seq_file *seq, void *v);
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos);
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos);
static void vlan_seq_stop(struct seq_file *seq, void *);
static int vlandev_seq_show(struct seq_file *seq, void *v);
/*
* Global Data
*/
/*
* Names of the proc directory entries
*/
static const char name_root[] = "vlan";
static const char name_conf[] = "config";
/*
* Structures for interfacing with the /proc filesystem.
* VLAN creates its own directory /proc/net/vlan with the folowing
* entries:
* config device status/configuration
* <device> entry for each device
*/
/*
* Generic /proc/net/vlan/<file> file and inode operations
*/
static const struct seq_operations vlan_seq_ops = {
.start = vlan_seq_start,
.next = vlan_seq_next,
.stop = vlan_seq_stop,
.show = vlan_seq_show,
};
static int vlan_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &vlan_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations vlan_fops = {
.owner = THIS_MODULE,
.open = vlan_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
/*
* /proc/net/vlan/<device> file and inode operations
*/
static int vlandev_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, vlandev_seq_show, PDE(inode)->data);
}
static const struct file_operations vlandev_fops = {
.owner = THIS_MODULE,
.open = vlandev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* Proc filesystem derectory entries.
*/
/* Strings */
static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
[VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
[VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID",
};
/*
* Interface functions
*/
/*
* Clean up /proc/net/vlan entries
*/
void vlan_proc_cleanup(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
if (vn->proc_vlan_conf)
remove_proc_entry(name_conf, vn->proc_vlan_dir);
if (vn->proc_vlan_dir)
proc_net_remove(net, name_root);
/* Dynamically added entries should be cleaned up as their vlan_device
* is removed, so we should not have to take care of it here...
*/
}
/*
* Create /proc/net/vlan entries
*/
int vlan_proc_init(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net);
if (!vn->proc_vlan_dir)
goto err;
vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
vn->proc_vlan_dir, &vlan_fops);
if (!vn->proc_vlan_conf)
goto err;
return 0;
err:
pr_err("%s: can't create entry in proc filesystem!\n", __func__);
vlan_proc_cleanup(net);
return -ENOBUFS;
}
/*
* Add directory entry for VLAN device.
*/
int vlan_proc_add_dev(struct net_device *vlandev)
{
struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
dev_info->dent =
proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
vn->proc_vlan_dir, &vlandev_fops, vlandev);
if (!dev_info->dent)
return -ENOBUFS;
return 0;
}
/*
* Delete directory entry for VLAN device.
*/
int vlan_proc_rem_dev(struct net_device *vlandev)
{
struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
/** NOTE: This will consume the memory pointed to by dent, it seems. */
if (vlan_dev_info(vlandev)->dent) {
remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
vn->proc_vlan_dir);
vlan_dev_info(vlandev)->dent = NULL;
}
return 0;
}
/****** Proc filesystem entry points ****************************************/
/*
* The following few functions build the content of /proc/net/vlan/config
*/
/* start read of /proc/net/vlan/config */
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(dev_base_lock)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
loff_t i = 1;
read_lock(&dev_base_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for_each_netdev(net, dev) {
if (!is_vlan_dev(dev))
continue;
if (i++ == *pos)
return dev;
}
return NULL;
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
++*pos;
dev = (struct net_device *)v;
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&net->dev_base_head);
for_each_netdev_continue(net, dev) {
if (!is_vlan_dev(dev))
continue;
return dev;
}
return NULL;
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
__releases(dev_base_lock)
{
read_unlock(&dev_base_lock);
}
static int vlan_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
struct vlan_net *vn = net_generic(net, vlan_net_id);
if (v == SEQ_START_TOKEN) {
const char *nmtype = NULL;
seq_puts(seq, "VLAN Dev name | VLAN ID\n");
if (vn->name_type < ARRAY_SIZE(vlan_name_type_str))
nmtype = vlan_name_type_str[vn->name_type];
seq_printf(seq, "Name-Type: %s\n",
nmtype ? nmtype : "UNKNOWN");
} else {
const struct net_device *vlandev = v;
const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
dev_info->vlan_id, dev_info->real_dev->name);
}
return 0;
}
static int vlandev_seq_show(struct seq_file *seq, void *offset)
{
struct net_device *vlandev = (struct net_device *) seq->private;
const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
const struct net_device_stats *stats;
static const char fmt[] = "%30s %12lu\n";
int i;
if (!is_vlan_dev(vlandev))
return 0;
stats = dev_get_stats(vlandev);
seq_printf(seq,
"%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
vlandev->name, dev_info->vlan_id,
(int)(dev_info->flags & 1), vlandev->priv_flags);
seq_printf(seq, fmt, "total frames received", stats->rx_packets);
seq_printf(seq, fmt, "total bytes received", stats->rx_bytes);
seq_printf(seq, fmt, "Broadcast/Multicast Rcvd", stats->multicast);
seq_puts(seq, "\n");
seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets);
seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes);
seq_printf(seq, fmt, "total headroom inc",
dev_info->cnt_inc_headroom_on_tx);
seq_printf(seq, fmt, "total encap on xmit",
dev_info->cnt_encap_on_xmit);
seq_printf(seq, "Device: %s", dev_info->real_dev->name);
/* now show all PRIORITY mappings relating to this VLAN */
seq_printf(seq, "\nINGRESS priority mappings: "
"0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
dev_info->ingress_priority_map[0],
dev_info->ingress_priority_map[1],
dev_info->ingress_priority_map[2],
dev_info->ingress_priority_map[3],
dev_info->ingress_priority_map[4],
dev_info->ingress_priority_map[5],
dev_info->ingress_priority_map[6],
dev_info->ingress_priority_map[7]);
seq_printf(seq, " EGRESS priority mappings: ");
for (i = 0; i < 16; i++) {
const struct vlan_priority_tci_mapping *mp
= dev_info->egress_priority_map[i];
while (mp) {
seq_printf(seq, "%u:%hu ",
mp->priority, ((mp->vlan_qos >> 13) & 0x7));
mp = mp->next;
}
}
seq_puts(seq, "\n");
return 0;
}

View File

@@ -0,0 +1,20 @@
#ifndef __BEN_VLAN_PROC_INC__
#define __BEN_VLAN_PROC_INC__
#ifdef CONFIG_PROC_FS
struct net;
int vlan_proc_init(struct net *net);
int vlan_proc_rem_dev(struct net_device *vlandev);
int vlan_proc_add_dev(struct net_device *vlandev);
void vlan_proc_cleanup(struct net *net);
#else /* No CONFIG_PROC_FS */
#define vlan_proc_init(net) (0)
#define vlan_proc_cleanup(net) do {} while (0)
#define vlan_proc_add_dev(dev) ({(void)(dev), 0; })
#define vlan_proc_rem_dev(dev) ({(void)(dev), 0; })
#endif
#endif /* !(__BEN_VLAN_PROC_INC__) */