Skip to content
Permalink
Branch: master
Find file Copy path
7685 lines (6771 sloc) 268 KB
/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include <config.h>
#include "ofproto/ofproto-dpif-xlate.h"
#include <errno.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <sys/socket.h>
#include "bfd.h"
#include "bitmap.h"
#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
#include "cfm.h"
#include "connmgr.h"
#include "coverage.h"
#include "csum.h"
#include "dp-packet.h"
#include "dpif.h"
#include "in-band.h"
#include "lacp.h"
#include "learn.h"
#include "mac-learning.h"
#include "mcast-snooping.h"
#include "multipath.h"
#include "netdev-vport.h"
#include "netlink.h"
#include "nx-match.h"
#include "odp-execute.h"
#include "ofproto/ofproto-dpif-ipfix.h"
#include "ofproto/ofproto-dpif-mirror.h"
#include "ofproto/ofproto-dpif-monitor.h"
#include "ofproto/ofproto-dpif-sflow.h"
#include "ofproto/ofproto-dpif-trace.h"
#include "ofproto/ofproto-dpif-xlate-cache.h"
#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/meta-flow.h"
#include "openvswitch/list.h"
#include "openvswitch/ofp-actions.h"
#include "openvswitch/ofp-ed-props.h"
#include "openvswitch/vlog.h"
#include "ovs-lldp.h"
#include "ovs-router.h"
#include "packets.h"
#include "tnl-neigh-cache.h"
#include "tnl-ports.h"
#include "tunnel.h"
#include "util.h"
#include "uuid.h"
COVERAGE_DEFINE(xlate_actions);
COVERAGE_DEFINE(xlate_actions_oversize);
COVERAGE_DEFINE(xlate_actions_too_many_output);
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation.
*
* The goal of limiting the depth of resubmits is to ensure that flow
* translation eventually terminates. Only resubmits to the same table or an
* earlier table count against the maximum depth. This is because resubmits to
* strictly monotonically increasing table IDs will eventually terminate, since
* any OpenFlow switch has a finite number of tables. OpenFlow tables are most
* commonly traversed in numerically increasing order, so this limit has little
* effect on conventionally designed OpenFlow pipelines.
*
* Outputs to patch ports and to groups also count against the depth limit. */
#define MAX_DEPTH 64
/* Maximum number of resubmit actions in a flow translation, whether they are
* recursive or not. */
#define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
/* The structure holds an array of IP addresses assigned to a bridge and the
* number of elements in the array. These data are mutable and are evaluated
* when ARP or Neighbor Advertisement packets received on a native tunnel
* port are xlated. So 'ref_cnt' and RCU are used for synchronization. */
struct xbridge_addr {
struct in6_addr *addr; /* Array of IP addresses of xbridge. */
int n_addr; /* Number of IP addresses. */
struct ovs_refcount ref_cnt;
};
struct xbridge {
struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
struct ovs_list xbundles; /* Owned xbundles. */
struct hmap xports; /* Indexed by ofp_port. */
char *name; /* Name used in log messages. */
struct dpif *dpif; /* Datapath interface. */
struct mac_learning *ml; /* Mac learning handle. */
struct mcast_snooping *ms; /* Multicast Snooping handle. */
struct mbridge *mbridge; /* Mirroring. */
struct dpif_sflow *sflow; /* SFlow handle, or null. */
struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
struct netflow *netflow; /* Netflow handle, or null. */
struct stp *stp; /* STP or null if disabled. */
struct rstp *rstp; /* RSTP or null if disabled. */
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
/* Datapath feature support. */
struct dpif_backer_support support;
struct xbridge_addr *addr;
};
struct xbundle {
struct hmap_node hmap_node; /* In global 'xbundles' map. */
struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
struct ovs_list list_node; /* In parent 'xbridges' list. */
struct xbridge *xbridge; /* Parent xbridge. */
struct ovs_list xports; /* Contains "struct xport"s. */
char *name; /* Name used in log messages. */
struct bond *bond; /* Nonnull iff more than one port. */
struct lacp *lacp; /* LACP handle or null. */
enum port_vlan_mode vlan_mode; /* VLAN mode. */
uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
* either 0x8100 or 0x88a8. */
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
* NULL if all VLANs are trunked. */
unsigned long *cvlans; /* Bitmap of allowed customer vlans,
* NULL if all VLANs are allowed */
bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
bool protected; /* Protected port mode */
};
struct xport {
struct hmap_node hmap_node; /* Node in global 'xports' map. */
struct ofport_dpif *ofport; /* Key in global 'xports map. */
struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
struct hmap_node uuid_node; /* Node in global 'xports_uuid' map. */
struct uuid uuid; /* Key in global 'xports_uuid' map. */
odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
struct xbundle *xbundle; /* Parent xbundle or null. */
struct netdev *netdev; /* 'ofport''s netdev. */
struct xbridge *xbridge; /* Parent bridge. */
struct xport *peer; /* Patch port peer or null. */
enum ofputil_port_config config; /* OpenFlow port configuration. */
enum ofputil_port_state state; /* OpenFlow port state. */
int stp_port_no; /* STP port number or -1 if not in use. */
struct rstp_port *rstp_port; /* RSTP port or null. */
struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* Is a tunnel port. */
enum netdev_pt_mode pt_mode; /* packet_type handling. */
struct cfm *cfm; /* CFM handle or null. */
struct bfd *bfd; /* BFD handle or null. */
struct lldp *lldp; /* LLDP handle or null. */
};
struct xlate_ctx {
struct xlate_in *xin;
struct xlate_out *xout;
struct xlate_cfg *xcfg;
const struct xbridge *xbridge;
/* Flow at the last commit. */
struct flow base_flow;
/* Tunnel IP destination address as received. This is stored separately
* as the base_flow.tunnel is cleared on init to reflect the datapath
* behavior. Used to make sure not to send tunneled output to ourselves,
* which might lead to an infinite loop. This could happen easily
* if a tunnel is marked as 'ip_remote=flow', and the flow does not
* actually set the tun_dst field. */
struct in6_addr orig_tunnel_ipv6_dst;
/* Stack for the push and pop actions. See comment above nx_stack_push()
* in nx-match.c for info on how the stack is stored. */
struct ofpbuf stack;
/* The rule that we are currently translating, or NULL. */
struct rule_dpif *rule;
/* Flow translation populates this with wildcards relevant in translation.
* When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
* null, this is a pointer to a temporary buffer. */
struct flow_wildcards *wc;
/* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
* this is the same pointer. When 'xin->odp_actions' is null, this points
* to a scratch ofpbuf. This allows code to add actions to
* 'ctx->odp_actions' without worrying about whether the caller really
* wants actions. */
struct ofpbuf *odp_actions;
/* Statistics maintained by xlate_table_action().
*
* These statistics limit the amount of work that a single flow
* translation can perform. The goal of the first of these, 'depth', is
* primarily to prevent translation from performing an infinite amount of
* work. It counts the current depth of nested "resubmit"s (and a few
* other activities); when a resubmit returns, it decreases. Resubmits to
* tables in strictly monotonically increasing order don't contribute to
* 'depth' because they cannot cause a flow translation to take an infinite
* amount of time (because the number of tables is finite). Translation
* aborts when 'depth' exceeds MAX_DEPTH.
*
* 'resubmits', on the other hand, prevents flow translation from
* performing an extraordinarily large while still finite amount of work.
* It counts the total number of resubmits (and a few other activities)
* that have been executed. Returning from a resubmit does not affect this
* counter. Thus, this limits the amount of work that a particular
* translation can perform. Translation aborts when 'resubmits' exceeds
* MAX_RESUBMITS (which is much larger than MAX_DEPTH).
*/
int depth; /* Current resubmit nesting depth. */
int resubmits; /* Total number of resubmits. */
bool in_action_set; /* Currently translating action_set, if true. */
bool in_packet_out; /* Currently translating a packet_out msg, if
* true. */
bool pending_encap; /* True when waiting to commit a pending
* encap action. */
bool pending_decap; /* True when waiting to commit a pending
* decap action. */
struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
* context for the datapath encap action.*/
uint8_t table_id; /* OpenFlow table ID where flow was found. */
ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint32_t sflow_n_outputs; /* Number of output ports. */
odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
bool exit; /* No further actions should be processed. */
mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
int mirror_snaplen; /* Max size of a mirror packet in byte. */
/* Freezing Translation
* ====================
*
* At some point during translation, the code may recognize the need to halt
* and checkpoint the translation in a way that it can be restarted again
* later. We call the checkpointing process "freezing" and the restarting
* process "thawing".
*
* The use cases for freezing are:
*
* - "Recirculation", where the translation process discovers that it
* doesn't have enough information to complete translation without
* actually executing the actions that have already been translated,
* which provides the additionally needed information. In these
* situations, translation freezes translation and assigns the frozen
* data a unique "recirculation ID", which it associates with the data
* in a table in userspace (see ofproto-dpif-rid.h). It also adds a
* OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
* actions. When a packet hits that action, the datapath looks its
* flow up again using the ID. If there's a miss, it comes back to
* userspace, which find the recirculation table entry for the ID,
* thaws the associated frozen data, and continues translation from
* that point given the additional information that is now known.
*
* The archetypal example is MPLS. As MPLS is implemented in
* OpenFlow, the protocol that follows the last MPLS label becomes
* known only when that label is popped by an OpenFlow action. That
* means that Open vSwitch can't extract the headers beyond the MPLS
* labels until the pop action is executed. Thus, at that point
* translation uses the recirculation process to extract the headers
* beyond the MPLS labels.
*
* (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
* output to bonds. OVS pre-populates all the datapath flows for bond
* output in the datapath, though, which means that the elaborate
* process of coming back to userspace for a second round of
* translation isn't needed, and so bonds don't follow the above
* process.)
*
* - "Continuation". A continuation is a way for an OpenFlow controller
* to interpose on a packet's traversal of the OpenFlow tables. When
* the translation process encounters a "controller" action with the
* "pause" flag, it freezes translation, serializes the frozen data,
* and sends it to an OpenFlow controller. The controller then
* examines and possibly modifies the frozen data and eventually sends
* it back to the switch, which thaws it and continues translation.
*
* The main problem of freezing translation is preserving state, so that
* when the translation is thawed later it resumes from where it left off,
* without disruption. In particular, actions must be preserved as follows:
*
* - If we're freezing because an action needed more information, the
* action that prompted it.
*
* - Any actions remaining to be translated within the current flow.
*
* - If translation was frozen within a NXAST_RESUBMIT, then any actions
* following the resubmit action. Resubmit actions can be nested, so
* this has to go all the way up the control stack.
*
* - The OpenFlow 1.1+ action set.
*
* State that actions and flow table lookups can depend on, such as the
* following, must also be preserved:
*
* - Metadata fields (input port, registers, OF1.1+ metadata, ...).
*
* - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
*
* - The table ID and cookie of the flow being translated at each level
* of the control stack, because these can become visible through
* OFPAT_CONTROLLER actions (and other ways).
*
* Translation allows for the control of this state preservation via these
* members. When a need to freeze translation is identified, the
* translation process:
*
* 1. Sets 'freezing' to true.
*
* 2. Sets 'exit' to true to tell later steps that we're exiting from the
* translation process.
*
* 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
* frozen_actions.header to the action to make it easy to find it later.
* This action holds the current table ID and cookie so that they can be
* restored during a post-recirculation upcall translation.
*
* 4. Adds the action that prompted recirculation and any actions following
* it within the same flow to 'frozen_actions', so that they can be
* executed during a post-recirculation upcall translation.
*
* 5. Returns.
*
* 6. The action that prompted recirculation might be nested in a stack of
* nested "resubmit"s that have actions remaining. Each of these notices
* that we're exiting and freezing and responds by adding more
* OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
* followed by any actions that were yet unprocessed.
*
* If we're freezing because of recirculation, the caller generates a
* recirculation ID and associates all the state produced by this process
* with it. For post-recirculation upcall translation, the caller passes it
* back in for the new translation to execute. The process yielded a set of
* ofpacts that can be translated directly, so it is not much of a special
* case at that point.
*/
bool freezing;
bool recirc_update_dp_hash; /* Generated recirculation will be preceded
* by datapath HASH action to get an updated
* dp_hash after recirculation. */
uint32_t dp_hash_alg;
uint32_t dp_hash_basis;
struct ofpbuf frozen_actions;
const struct ofpact_controller *pause;
/* True if a packet was but is no longer MPLS (due to an MPLS pop action).
* This is a trigger for recirculation in cases where translating an action
* or looking up a flow requires access to the fields of the packet after
* the MPLS label stack that was originally present. */
bool was_mpls;
/* True if conntrack has been performed on this packet during processing
* on the current bridge. This is used to determine whether conntrack
* state from the datapath should be honored after thawing. */
bool conntracked;
/* Pointer to an embedded NAT action in a conntrack action, or NULL. */
struct ofpact_nat *ct_nat_action;
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
* When translation is otherwise complete, ofpacts_execute_action_set()
* converts it to a set of "struct ofpact"s that can be translated into
* datapath actions. */
bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
struct ofpbuf action_set; /* Action set. */
enum xlate_error error; /* Translation failed. */
};
/* Structure to track VLAN manipulation */
struct xvlan_single {
uint16_t tpid;
uint16_t vid;
uint16_t pcp;
};
struct xvlan {
struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
};
const char *xlate_strerror(enum xlate_error error)
{
switch (error) {
case XLATE_OK:
return "OK";
case XLATE_BRIDGE_NOT_FOUND:
return "Bridge not found";
case XLATE_RECURSION_TOO_DEEP:
return "Recursion too deep";
case XLATE_TOO_MANY_RESUBMITS:
return "Too many resubmits";
case XLATE_STACK_TOO_DEEP:
return "Stack too deep";
case XLATE_NO_RECIRCULATION_CONTEXT:
return "No recirculation context";
case XLATE_RECIRCULATION_CONFLICT:
return "Recirculation conflict";
case XLATE_TOO_MANY_MPLS_LABELS:
return "Too many MPLS labels";
case XLATE_INVALID_TUNNEL_METADATA:
return "Invalid tunnel metadata";
case XLATE_UNSUPPORTED_PACKET_TYPE:
return "Unsupported packet type";
}
return "Unknown error";
}
static void xlate_action_set(struct xlate_ctx *ctx);
static void xlate_commit_actions(struct xlate_ctx *ctx);
static void
patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
struct xport *out_dev);
static void
ctx_trigger_freeze(struct xlate_ctx *ctx)
{
ctx->exit = true;
ctx->freezing = true;
}
static void
ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
uint32_t basis)
{
ctx->exit = true;
ctx->freezing = true;
ctx->recirc_update_dp_hash = true;
ctx->dp_hash_alg = type;
ctx->dp_hash_basis = basis;
}
static bool
ctx_first_frozen_action(const struct xlate_ctx *ctx)
{
return !ctx->frozen_actions.size;
}
static void
ctx_cancel_freeze(struct xlate_ctx *ctx)
{
if (ctx->freezing) {
ctx->freezing = false;
ctx->recirc_update_dp_hash = false;
ofpbuf_clear(&ctx->frozen_actions);
ctx->frozen_actions.header = NULL;
}
}
static void finish_freezing(struct xlate_ctx *ctx);
/* A controller may use OFPP_NONE as the ingress port to indicate that
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
* when an input bundle is needed for validation (e.g., mirroring or
* OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
* any 'port' structs, so care must be taken when dealing with it. */
static struct xbundle ofpp_none_bundle = {
.name = "OFPP_NONE",
.vlan_mode = PORT_VLAN_TRUNK
};
/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
* 'priority' (the datapath's term for QoS queue) to the dscp bits which all
* traffic egressing the 'ofport' with that priority should be marked with. */
struct skb_priority_to_dscp {
struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
uint32_t skb_priority; /* Priority of this queue (see struct flow). */
uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
};
/* Xlate config contains hash maps of all bridges, bundles and ports.
* Xcfgp contains the pointer to the current xlate configuration.
* When the main thread needs to change the configuration, it copies xcfgp to
* new_xcfg and edits new_xcfg. This enables the use of RCU locking which
* does not block handler and revalidator threads. */
struct xlate_cfg {
struct hmap xbridges;
struct hmap xbundles;
struct hmap xports;
struct hmap xports_uuid;
};
static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
static struct xlate_cfg *new_xcfg = NULL;
typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *, bool, bool);
static bool may_receive(const struct xport *, struct xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *, bool, bool);
static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *, bool, bool);
static void xlate_normal(struct xlate_ctx *);
static void xlate_normal_flood(struct xlate_ctx *ct,
struct xbundle *in_xbundle, struct xvlan *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
uint8_t table_id, bool may_packet_in,
bool honor_table_miss, bool with_ct_orig,
bool is_last_action, xlate_actions_handler *);
static bool input_vid_is_valid(const struct xlate_ctx *,
uint16_t vid, struct xbundle *);
static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
static void xvlan_pop(struct xvlan *src);
static void xvlan_push_uninit(struct xvlan *src);
static void xvlan_extract(const struct flow *, struct xvlan *);
static void xvlan_put(struct flow *, const struct xvlan *);
static void xvlan_input_translate(const struct xbundle *,
const struct xvlan *in,
struct xvlan *xvlan);
static void xvlan_output_translate(const struct xbundle *,
const struct xvlan *xvlan,
struct xvlan *out);
static void output_normal(struct xlate_ctx *, const struct xbundle *,
const struct xvlan *);
/* Optional bond recirculation parameter to compose_output_action(). */
struct xlate_bond_recirc {
uint32_t recirc_id; /* !0 Use recirculation instead of output. */
uint8_t hash_alg; /* !0 Compute hash for recirc before. */
uint32_t hash_basis; /* Compute hash for recirc before. */
};
static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
const struct xlate_bond_recirc *xr,
bool is_last_action, bool truncate);
static struct xbridge *xbridge_lookup(struct xlate_cfg *,
const struct ofproto_dpif *);
static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
const struct uuid *);
static struct xbundle *xbundle_lookup(struct xlate_cfg *,
const struct ofbundle *);
static struct xport *xport_lookup(struct xlate_cfg *,
const struct ofport_dpif *);
static struct xport *xport_lookup_by_uuid(struct xlate_cfg *,
const struct uuid *);
static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
uint32_t skb_priority);
static void clear_skb_priorities(struct xport *);
static size_t count_skb_priorities(const struct xport *);
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
uint8_t *dscp);
static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
static void xlate_xport_init(struct xlate_cfg *, struct xport *);
static void xlate_xbridge_set(struct xbridge *, struct dpif *,
const struct mac_learning *, struct stp *,
struct rstp *, const struct mcast_snooping *,
const struct mbridge *,
const struct dpif_sflow *,
const struct dpif_ipfix *,
const struct netflow *,
bool forward_bpdu, bool has_in_band,
const struct dpif_backer_support *,
const struct xbridge_addr *);
static void xlate_xbundle_set(struct xbundle *xbundle,
enum port_vlan_mode vlan_mode,
uint16_t qinq_ethtype, int vlan,
unsigned long *trunks, unsigned long *cvlans,
bool use_priority_tags,
const struct bond *bond, const struct lacp *lacp,
bool floodable, bool protected);
static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
const struct netdev *netdev, const struct cfm *cfm,
const struct bfd *bfd, const struct lldp *lldp,
int stp_port_no, const struct rstp_port *rstp_port,
enum ofputil_port_config config,
enum ofputil_port_state state, bool is_tunnel,
bool may_enable);
static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
static void xlate_xbridge_copy(struct xbridge *);
static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
static void xlate_xport_copy(struct xbridge *, struct xbundle *,
struct xport *);
static void xlate_xcfg_free(struct xlate_cfg *);
/* Tracing helpers. */
/* If tracing is enabled in 'ctx', creates a new trace node and appends it to
* the list of nodes maintained in ctx->xin. The new node has type 'type' and
* its text is created from 'format' by treating it as a printf format string.
* Returns the list of nodes embedded within the new trace node; ordinarily,
* the calleer can ignore this, but it is useful if the caller needs to nest
* more trace nodes within the new node.
*
* If tracing is not enabled, does nothing and returns NULL. */
static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
const char *format, ...)
{
struct ovs_list *subtrace = NULL;
if (OVS_UNLIKELY(ctx->xin->trace)) {
va_list args;
va_start(args, format);
char *text = xvasprintf(format, args);
subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
va_end(args);
free(text);
}
return subtrace;
}
/* This is like xlate_report() for errors that are serious enough that we
* should log them even if we are not tracing. */
static void OVS_PRINTF_FORMAT(2, 3)
xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
if (!OVS_UNLIKELY(ctx->xin->trace)
&& (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
return;
}
struct ds s = DS_EMPTY_INITIALIZER;
va_list args;
va_start(args, format);
ds_put_format_valist(&s, format, args);
va_end(args);
if (ctx->xin->trace) {
oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
} else {
ds_put_format(&s, " on bridge %s while processing ",
ctx->xbridge->name);
flow_format(&s, &ctx->base_flow, NULL);
VLOG_WARN("%s", ds_cstr(&s));
}
ds_destroy(&s);
}
/* This is like xlate_report() for messages that should be logged
at the info level (even when not tracing). */
static void OVS_PRINTF_FORMAT(2, 3)
xlate_report_info(const struct xlate_ctx *ctx, const char *format, ...)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
if (!OVS_UNLIKELY(ctx->xin->trace)
&& (!ctx->xin->packet || VLOG_DROP_INFO(&rl))) {
return;
}
struct ds s = DS_EMPTY_INITIALIZER;
va_list args;
va_start(args, format);
ds_put_format_valist(&s, format, args);
va_end(args);
if (ctx->xin->trace) {
oftrace_report(ctx->xin->trace, OFT_WARN, ds_cstr(&s));
} else {
ds_put_format(&s, " on bridge %s while processing ",
ctx->xbridge->name);
flow_format(&s, &ctx->base_flow, NULL);
VLOG_INFO("%s", ds_cstr(&s));
}
ds_destroy(&s);
}
/* This is like xlate_report() for messages that should be logged at debug
* level (even if we are not tracing) because they can be valuable for
* debugging. */
static void OVS_PRINTF_FORMAT(3, 4)
xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
const char *format, ...)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
if (!OVS_UNLIKELY(ctx->xin->trace)
&& (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
return;
}
struct ds s = DS_EMPTY_INITIALIZER;
va_list args;
va_start(args, format);
ds_put_format_valist(&s, format, args);
va_end(args);
if (ctx->xin->trace) {
oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
} else {
VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
}
ds_destroy(&s);
}
/* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
* trace, whose text is 'title' followed by a formatted version of the
* 'ofpacts_len' OpenFlow actions in 'ofpacts'.
*
* If tracing is not enabled, does nothing. */
static void
xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
const char *title,
const struct ofpact *ofpacts, size_t ofpacts_len)
{
if (OVS_UNLIKELY(ctx->xin->trace)) {
struct ds s = DS_EMPTY_INITIALIZER;
ds_put_format(&s, "%s: ", title);
struct ofpact_format_params fp = { .s = &s };
ofpacts_format(ofpacts, ofpacts_len, &fp);
oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
ds_destroy(&s);
}
}
/* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
* trace, whose the message is a formatted version of the OpenFlow action set.
* 'verb' should be "was" or "is", depending on whether the action set reported
* is the new action set or the old one.
*
* If tracing is not enabled, does nothing. */
static void
xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
{
if (OVS_UNLIKELY(ctx->xin->trace)) {
struct ofpbuf action_list;
ofpbuf_init(&action_list, 0);
ofpacts_execute_action_set(&action_list, &ctx->action_set);
if (action_list.size) {
struct ds s = DS_EMPTY_INITIALIZER;
struct ofpact_format_params fp = { .s = &s };
ofpacts_format(action_list.data, action_list.size, &fp);
xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
verb, ds_cstr(&s));
ds_destroy(&s);
} else {
xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
}
ofpbuf_uninit(&action_list);
}
}
/* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
* OpenFlow table 'table_id') to the trace and makes this node the parent for
* future trace nodes. The caller should save ctx->xin->trace before calling
* this function, then after tracing all of the activities under the table,
* restore its previous value.
*
* If tracing is not enabled, does nothing. */
static void
xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
uint8_t table_id)
{
if (OVS_LIKELY(!ctx->xin->trace)) {
return;
}
struct ds s = DS_EMPTY_INITIALIZER;
ds_put_format(&s, "%2d. ", table_id);
if (rule == ctx->xin->ofproto->miss_rule) {
ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
} else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
ds_put_cstr(&s, "No match.");
} else if (rule == ctx->xin->ofproto->drop_frags_rule) {
ds_put_cstr(&s, "Packets are IP fragments and "
"the fragment handling mode is \"drop\".");
} else {
minimatch_format(&rule->up.cr.match,
ofproto_get_tun_tab(&ctx->xin->ofproto->up),
NULL, &s, OFP_DEFAULT_PRIORITY);
if (ds_last(&s) != ' ') {
ds_put_cstr(&s, ", ");
}
ds_put_format(&s, "priority %d", rule->up.cr.priority);
if (rule->up.flow_cookie) {
ds_put_format(&s, ", cookie %#"PRIx64,
ntohll(rule->up.flow_cookie));
}
}
ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
ds_cstr(&s))->subs;
ds_destroy(&s);
}
/* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
* reporting the value of subfield 'sf'.
*
* If tracing is not enabled, does nothing. */
static void
xlate_report_subfield(const struct xlate_ctx *ctx,
const struct mf_subfield *sf)
{
if (OVS_UNLIKELY(ctx->xin->trace)) {
struct ds s = DS_EMPTY_INITIALIZER;
mf_format_subfield(sf, &s);
ds_put_cstr(&s, " is now ");
if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
union mf_value value;
mf_get_value(sf->field, &ctx->xin->flow, &value);
mf_format(sf->field, &value, NULL, NULL, &s);
} else {
union mf_subvalue cst;
mf_read_subfield(sf, &ctx->xin->flow, &cst);
ds_put_hex(&s, &cst, sizeof cst);
}
xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
ds_destroy(&s);
}
}
static void
xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
{
ovs_list_init(&xbridge->xbundles);
hmap_init(&xbridge->xports);
hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
hash_pointer(xbridge->ofproto, 0));
}
static void
xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
{
ovs_list_init(&xbundle->xports);
ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
hash_pointer(xbundle->ofbundle, 0));
}
static void
xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
{
hmap_init(&xport->skb_priorities);
hmap_insert(&xcfg->xports, &xport->hmap_node,
hash_pointer(xport->ofport, 0));
hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
hash_ofp_port(xport->ofp_port));
hmap_insert(&xcfg->xports_uuid, &xport->uuid_node,
uuid_hash(&xport->uuid));
}
static struct xbridge_addr *
xbridge_addr_create(struct xbridge *xbridge)
{
struct xbridge_addr *xbridge_addr = xbridge->addr;
struct in6_addr *addr = NULL, *mask = NULL;
struct netdev *dev;
int err, n_addr = 0;
err = netdev_open(xbridge->name, NULL, &dev);
if (!err) {
err = netdev_get_addr_list(dev, &addr, &mask, &n_addr);
if (!err) {
if (!xbridge->addr ||
n_addr != xbridge->addr->n_addr ||
(xbridge->addr->addr && memcmp(addr, xbridge->addr->addr,
sizeof(*addr) * n_addr))) {
xbridge_addr = xzalloc(sizeof *xbridge_addr);
xbridge_addr->addr = addr;
xbridge_addr->n_addr = n_addr;
ovs_refcount_init(&xbridge_addr->ref_cnt);
} else {
free(addr);
}
free(mask);
}
netdev_close(dev);
}
return xbridge_addr;
}
static struct xbridge_addr *
xbridge_addr_ref(const struct xbridge_addr *addr_)
{
struct xbridge_addr *addr = CONST_CAST(struct xbridge_addr *, addr_);
if (addr) {
ovs_refcount_ref(&addr->ref_cnt);
}
return addr;
}
static void
xbridge_addr_unref(struct xbridge_addr *addr)
{
if (addr && ovs_refcount_unref_relaxed(&addr->ref_cnt) == 1) {
free(addr->addr);
free(addr);
}
}
static void
xlate_xbridge_set(struct xbridge *xbridge,
struct dpif *dpif,
const struct mac_learning *ml, struct stp *stp,
struct rstp *rstp, const struct mcast_snooping *ms,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow,
bool forward_bpdu, bool has_in_band,
const struct dpif_backer_support *support,
const struct xbridge_addr *addr)
{
if (xbridge->ml != ml) {
mac_learning_unref(xbridge->ml);
xbridge->ml = mac_learning_ref(ml);
}
if (xbridge->ms != ms) {
mcast_snooping_unref(xbridge->ms);
xbridge->ms = mcast_snooping_ref(ms);
}
if (xbridge->mbridge != mbridge) {
mbridge_unref(xbridge->mbridge);
xbridge->mbridge = mbridge_ref(mbridge);
}
if (xbridge->sflow != sflow) {
dpif_sflow_unref(xbridge->sflow);
xbridge->sflow = dpif_sflow_ref(sflow);
}
if (xbridge->ipfix != ipfix) {
dpif_ipfix_unref(xbridge->ipfix);
xbridge->ipfix = dpif_ipfix_ref(ipfix);
}
if (xbridge->stp != stp) {
stp_unref(xbridge->stp);
xbridge->stp = stp_ref(stp);
}
if (xbridge->rstp != rstp) {
rstp_unref(xbridge->rstp);
xbridge->rstp = rstp_ref(rstp);
}
if (xbridge->netflow != netflow) {
netflow_unref(xbridge->netflow);
xbridge->netflow = netflow_ref(netflow);
}
if (xbridge->addr != addr) {
xbridge_addr_unref(xbridge->addr);
xbridge->addr = xbridge_addr_ref(addr);
}
xbridge->dpif = dpif;
xbridge->forward_bpdu = forward_bpdu;
xbridge->has_in_band = has_in_band;
xbridge->support = *support;
}
static void
xlate_xbundle_set(struct xbundle *xbundle,
enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
int vlan, unsigned long *trunks, unsigned long *cvlans,
bool use_priority_tags,
const struct bond *bond, const struct lacp *lacp,
bool floodable, bool protected)
{
ovs_assert(xbundle->xbridge);
xbundle->vlan_mode = vlan_mode;
xbundle->qinq_ethtype = qinq_ethtype;
xbundle->vlan = vlan;
xbundle->trunks = trunks;
xbundle->cvlans = cvlans;
xbundle->use_priority_tags = use_priority_tags;
xbundle->floodable = floodable;
xbundle->protected = protected;
if (xbundle->bond != bond) {
bond_unref(xbundle->bond);
xbundle->bond = bond_ref(bond);
}
if (xbundle->lacp != lacp) {
lacp_unref(xbundle->lacp);
xbundle->lacp = lacp_ref(lacp);
}
}
static void
xlate_xport_set(struct xport *xport, odp_port_t odp_port,
const struct netdev *netdev, const struct cfm *cfm,
const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
const struct rstp_port* rstp_port,
enum ofputil_port_config config, enum ofputil_port_state state,
bool is_tunnel, bool may_enable)
{
xport->config = config;
xport->state = state;
xport->stp_port_no = stp_port_no;
xport->is_tunnel = is_tunnel;
xport->pt_mode = netdev_get_pt_mode(netdev);
xport->may_enable = may_enable;
xport->odp_port = odp_port;
if (xport->rstp_port != rstp_port) {
rstp_port_unref(xport->rstp_port);
xport->rstp_port = rstp_port_ref(rstp_port);
}
if (xport->cfm != cfm) {
cfm_unref(xport->cfm);
xport->cfm = cfm_ref(cfm);
}
if (xport->bfd != bfd) {
bfd_unref(xport->bfd);
xport->bfd = bfd_ref(bfd);
}
if (xport->lldp != lldp) {
lldp_unref(xport->lldp);
xport->lldp = lldp_ref(lldp);
}
if (xport->netdev != netdev) {
netdev_close(xport->netdev);
xport->netdev = netdev_ref(netdev);
}
}
static void
xlate_xbridge_copy(struct xbridge *xbridge)
{
struct xbundle *xbundle;
struct xport *xport;
struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
new_xbridge->ofproto = xbridge->ofproto;
new_xbridge->name = xstrdup(xbridge->name);
xlate_xbridge_init(new_xcfg, new_xbridge);
xlate_xbridge_set(new_xbridge,
xbridge->dpif, xbridge->ml, xbridge->stp,
xbridge->rstp, xbridge->ms, xbridge->mbridge,
xbridge->sflow, xbridge->ipfix, xbridge->netflow,
xbridge->forward_bpdu, xbridge->has_in_band,
&xbridge->support, xbridge->addr);
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
xlate_xbundle_copy(new_xbridge, xbundle);
}
/* Copy xports which are not part of a xbundle */
HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
if (!xport->xbundle) {
xlate_xport_copy(new_xbridge, NULL, xport);
}
}
}
static void
xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
{
struct xport *xport;
struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
new_xbundle->ofbundle = xbundle->ofbundle;
new_xbundle->xbridge = xbridge;
new_xbundle->name = xstrdup(xbundle->name);
xlate_xbundle_init(new_xcfg, new_xbundle);
xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
xbundle->vlan, xbundle->trunks, xbundle->cvlans,
xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
xbundle->floodable, xbundle->protected);
LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
xlate_xport_copy(xbridge, new_xbundle, xport);
}
}
static void
xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
struct xport *xport)
{
struct skb_priority_to_dscp *pdscp, *new_pdscp;
struct xport *new_xport = xzalloc(sizeof *xport);
new_xport->ofport = xport->ofport;
new_xport->ofp_port = xport->ofp_port;
new_xport->xbridge = xbridge;
new_xport->uuid = xport->uuid;
xlate_xport_init(new_xcfg, new_xport);
xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
xport->bfd, xport->lldp, xport->stp_port_no,
xport->rstp_port, xport->config, xport->state,
xport->is_tunnel, xport->may_enable);
if (xport->peer) {
struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
if (peer) {
new_xport->peer = peer;
new_xport->peer->peer = new_xport;
}
}
if (xbundle) {
new_xport->xbundle = xbundle;
ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
}
HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
new_pdscp = xmalloc(sizeof *pdscp);
new_pdscp->skb_priority = pdscp->skb_priority;
new_pdscp->dscp = pdscp->dscp;
hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
hash_int(new_pdscp->skb_priority, 0));
}
}
/* Sets the current xlate configuration to new_xcfg and frees the old xlate
* configuration in xcfgp.
*
* This needs to be called after editing the xlate configuration.
*
* Functions that edit the new xlate configuration are
* xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
*
* A sample workflow:
*
* xlate_txn_start();
* ...
* edit_xlate_configuration();
* ...
* xlate_txn_commit(); */
void
xlate_txn_commit(void)
{
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ovsrcu_set(&xcfgp, new_xcfg);
ovsrcu_synchronize();
xlate_xcfg_free(xcfg);
new_xcfg = NULL;
}
/* Copies the current xlate configuration in xcfgp to new_xcfg.
*
* This needs to be called prior to editing the xlate configuration. */
void
xlate_txn_start(void)
{
struct xbridge *xbridge;
struct xlate_cfg *xcfg;
ovs_assert(!new_xcfg);
new_xcfg = xmalloc(sizeof *new_xcfg);
hmap_init(&new_xcfg->xbridges);
hmap_init(&new_xcfg->xbundles);
hmap_init(&new_xcfg->xports);
hmap_init(&new_xcfg->xports_uuid);
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
if (!xcfg) {
return;
}
HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
xlate_xbridge_copy(xbridge);
}
}
static void
xlate_xcfg_free(struct xlate_cfg *xcfg)
{
struct xbridge *xbridge, *next_xbridge;
if (!xcfg) {
return;
}
HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
xlate_xbridge_remove(xcfg, xbridge);
}
hmap_destroy(&xcfg->xbridges);
hmap_destroy(&xcfg->xbundles);
hmap_destroy(&xcfg->xports);
hmap_destroy(&xcfg->xports_uuid);
free(xcfg);
}
void
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
struct dpif *dpif,
const struct mac_learning *ml, struct stp *stp,
struct rstp *rstp, const struct mcast_snooping *ms,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow,
bool forward_bpdu, bool has_in_band,
const struct dpif_backer_support *support)
{
struct xbridge *xbridge;
struct xbridge_addr *xbridge_addr, *old_addr;
ovs_assert(new_xcfg);
xbridge = xbridge_lookup(new_xcfg, ofproto);
if (!xbridge) {
xbridge = xzalloc(sizeof *xbridge);
xbridge->ofproto = ofproto;
xlate_xbridge_init(new_xcfg, xbridge);
}
free(xbridge->name);
xbridge->name = xstrdup(name);
xbridge_addr = xbridge_addr_create(xbridge);
old_addr = xbridge->addr;
xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
netflow, forward_bpdu, has_in_band, support,
xbridge_addr);
if (xbridge_addr != old_addr) {
xbridge_addr_unref(xbridge_addr);
}
}
static void
xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
{
struct xbundle *xbundle, *next_xbundle;
struct xport *xport, *next_xport;
if (!xbridge) {
return;
}
HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
xlate_xport_remove(xcfg, xport);
}
LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
xlate_xbundle_remove(xcfg, xbundle);
}
hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
mac_learning_unref(xbridge->ml);
mcast_snooping_unref(xbridge->ms);
mbridge_unref(xbridge->mbridge);
dpif_sflow_unref(xbridge->sflow);
dpif_ipfix_unref(xbridge->ipfix);
netflow_unref(xbridge->netflow);
stp_unref(xbridge->stp);
rstp_unref(xbridge->rstp);
xbridge_addr_unref(xbridge->addr);
hmap_destroy(&xbridge->xports);
free(xbridge->name);
free(xbridge);
}
void
xlate_remove_ofproto(struct ofproto_dpif *ofproto)
{
struct xbridge *xbridge;
ovs_assert(new_xcfg);
xbridge = xbridge_lookup(new_xcfg, ofproto);
xlate_xbridge_remove(new_xcfg, xbridge);
}
void
xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
const char *name, enum port_vlan_mode vlan_mode,
uint16_t qinq_ethtype, int vlan,
unsigned long *trunks, unsigned long *cvlans,
bool use_priority_tags,
const struct bond *bond, const struct lacp *lacp,
bool floodable, bool protected)
{
struct xbundle *xbundle;
ovs_assert(new_xcfg);
xbundle = xbundle_lookup(new_xcfg, ofbundle);
if (!xbundle) {
xbundle = xzalloc(sizeof *xbundle);
xbundle->ofbundle = ofbundle;
xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
xlate_xbundle_init(new_xcfg, xbundle);
}
free(xbundle->name);
xbundle->name = xstrdup(name);
xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
use_priority_tags, bond, lacp, floodable, protected);
}
static void
xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
{
struct xport *xport;
if (!xbundle) {
return;
}
LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
xport->xbundle = NULL;
}
hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
ovs_list_remove(&xbundle->list_node);
bond_unref(xbundle->bond);
lacp_unref(xbundle->lacp);
free(xbundle->name);
free(xbundle);
}
void
xlate_bundle_remove(struct ofbundle *ofbundle)
{
struct xbundle *xbundle;
ovs_assert(new_xcfg);
xbundle = xbundle_lookup(new_xcfg, ofbundle);
xlate_xbundle_remove(new_xcfg, xbundle);
}
void
xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
struct ofport_dpif *ofport, ofp_port_t ofp_port,
odp_port_t odp_port, const struct netdev *netdev,
const struct cfm *cfm, const struct bfd *bfd,
const struct lldp *lldp, struct ofport_dpif *peer,
int stp_port_no, const struct rstp_port *rstp_port,
const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
enum ofputil_port_config config,
enum ofputil_port_state state, bool is_tunnel,
bool may_enable)
{
size_t i;
struct xport *xport;
ovs_assert(new_xcfg);
xport = xport_lookup(new_xcfg, ofport);
if (!xport) {
xport = xzalloc(sizeof *xport);
xport->ofport = ofport;
xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
xport->ofp_port = ofp_port;
uuid_generate(&xport->uuid);
xlate_xport_init(new_xcfg, xport);
}
ovs_assert(xport->ofp_port == ofp_port);
xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
stp_port_no, rstp_port, config, state, is_tunnel,
may_enable);
if (xport->peer) {
xport->peer->peer = NULL;
}
xport->peer = xport_lookup(new_xcfg, peer);
if (xport->peer) {
xport->peer->peer = xport;
}
if (xport->xbundle) {
ovs_list_remove(&xport->bundle_node);
}
xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
if (xport->xbundle) {
ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
}
clear_skb_priorities(xport);
for (i = 0; i < n_qdscp; i++) {
struct skb_priority_to_dscp *pdscp;
uint32_t skb_priority;
if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
&skb_priority)) {
continue;
}
pdscp = xmalloc(sizeof *pdscp);
pdscp->skb_priority = skb_priority;
pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
hash_int(pdscp->skb_priority, 0));
}
}
static void
xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
{
if (!xport) {
return;
}
if (xport->peer) {
xport->peer->peer = NULL;
xport->peer = NULL;
}
if (xport->xbundle) {
ovs_list_remove(&xport->bundle_node);
}
clear_skb_priorities(xport);
hmap_destroy(&xport->skb_priorities);
hmap_remove(&xcfg->xports, &xport->hmap_node);
hmap_remove(&xcfg->xports_uuid, &xport->uuid_node);
hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
netdev_close(xport->netdev);
rstp_port_unref(xport->rstp_port);
cfm_unref(xport->cfm);
bfd_unref(xport->bfd);
lldp_unref(xport->lldp);
free(xport);
}
void
xlate_ofport_remove(struct ofport_dpif *ofport)
{
struct xport *xport;
ovs_assert(new_xcfg);
xport = xport_lookup(new_xcfg, ofport);
xlate_xport_remove(new_xcfg, xport);
}
static struct ofproto_dpif *
xlate_lookup_ofproto_(const struct dpif_backer *backer,
const struct flow *flow,
ofp_port_t *ofp_in_port, const struct xport **xportp,
char **errorp)
{
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
const struct xport *xport;
/* If packet is recirculated, xport can be retrieved from frozen state. */
if (flow->recirc_id) {
const struct recirc_id_node *recirc_id_node;
recirc_id_node = recirc_id_node_find(flow->recirc_id);
if (OVS_UNLIKELY(!recirc_id_node)) {
if (errorp) {
*errorp = xasprintf("no recirculation data for recirc_id "
"%"PRIu32, flow->recirc_id);
}
return NULL;
}
/* If recirculation was initiated due to bond (in_port = OFPP_NONE)
* then frozen state is static and xport_uuid is not defined, so xport
* cannot be restored from frozen state. */
if (recirc_id_node->state.metadata.in_port != OFPP_NONE) {
struct uuid xport_uuid = recirc_id_node->state.xport_uuid;
xport = xport_lookup_by_uuid(xcfg, &xport_uuid);
if (xport && xport->xbridge && xport->xbridge->ofproto) {
goto out;
}
}
}
xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
? tnl_port_receive(flow)
: odp_port_to_ofport(backer, flow->in_port.odp_port));
if (OVS_UNLIKELY(!xport)) {
if (errorp) {
*errorp = (tnl_port_should_receive(flow)
? xstrdup("no OpenFlow tunnel port for this packet")
: xasprintf("no OpenFlow tunnel port for datapath "
"port %"PRIu32, flow->in_port.odp_port));
}
return NULL;
}
out:
if (errorp) {
*errorp = NULL;
}
*xportp = xport;
if (ofp_in_port) {
*ofp_in_port = xport->ofp_port;
}
return xport->xbridge->ofproto;
}
/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
* returns the corresponding struct ofproto_dpif and OpenFlow port number. */
struct ofproto_dpif *
xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
ofp_port_t *ofp_in_port, char **errorp)
{
const struct xport *xport;
return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, errorp);
}
/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
* optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
* openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
* handles for those protocols if they're enabled. Caller may use the returned
* pointers until quiescing, for longer term use additional references must
* be taken.
*
* Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
*/
int
xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
struct dpif_sflow **sflow, struct netflow **netflow,
ofp_port_t *ofp_in_port)
{
struct ofproto_dpif *ofproto;
const struct xport *xport;
ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, NULL);
if (!ofproto) {
return ENODEV;
}
if (ofprotop) {
*ofprotop = ofproto;
}
if (ipfix) {
*ipfix = xport ? xport->xbridge->ipfix : NULL;
}
if (sflow) {
*sflow = xport ? xport->xbridge->sflow : NULL;
}
if (netflow) {
*netflow = xport ? xport->xbridge->netflow : NULL;
}
return 0;
}
static struct xbridge *
xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
{
struct hmap *xbridges;
struct xbridge *xbridge;
if (!ofproto || !xcfg) {
return NULL;
}
xbridges = &xcfg->xbridges;
HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
xbridges) {
if (xbridge->ofproto == ofproto) {
return xbridge;
}
}
return NULL;
}
static struct xbridge *
xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
{
struct xbridge *xbridge;
HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
return xbridge;
}
}
return NULL;
}
static struct xbundle *
xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
{
struct hmap *xbundles;
struct xbundle *xbundle;
if (!ofbundle || !xcfg) {
return NULL;
}
xbundles = &xcfg->xbundles;
HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
xbundles) {
if (xbundle->ofbundle == ofbundle) {
return xbundle;
}
}
return NULL;
}
static struct xport *
xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
{
struct hmap *xports;
struct xport *xport;
if (!ofport || !xcfg) {
return NULL;
}
xports = &xcfg->xports;
HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
xports) {
if (xport->ofport == ofport) {
return xport;
}
}
return NULL;
}
static struct xport *
xport_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
{
struct hmap *xports;
struct xport *xport;
if (uuid_is_zero(uuid) || !xcfg) {
return NULL;
}
xports = &xcfg->xports_uuid;
HMAP_FOR_EACH_IN_BUCKET (xport, uuid_node, uuid_hash(uuid), xports) {
if (uuid_equals(&xport->uuid, uuid)) {
return xport;
}
}
return NULL;
}
static struct stp_port *
xport_get_stp_port(const struct xport *xport)
{
return xport->xbridge->stp && xport->stp_port_no != -1
? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
: NULL;
}
static bool
xport_stp_learn_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return sp
? stp_learn_in_state(stp_port_get_state(sp))
: true;
}
static bool
xport_stp_forward_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return sp
? stp_forward_in_state(stp_port_get_state(sp))
: true;
}
static bool
xport_stp_should_forward_bpdu(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
}
/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
* were used to make the determination.*/
static bool
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
/* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
return is_stp(flow);
}
static void
stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
{
struct stp_port *sp = xport_get_stp_port(xport);
struct dp_packet payload = *packet;
struct eth_header *eth = dp_packet_data(&payload);
/* Sink packets on ports that have STP disabled when the bridge has
* STP enabled. */
if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
return;
}
/* Trim off padding on payload. */
if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
}
if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
}
}
static enum rstp_state
xport_get_rstp_port_state(const struct xport *xport)
{
return xport->rstp_port
? rstp_port_get_state(xport->rstp_port)
: RSTP_DISABLED;
}
static bool
xport_rstp_learn_state(const struct xport *xport)
{
return xport->xbridge->rstp && xport->rstp_port
? rstp_learn_in_state(xport_get_rstp_port_state(xport))
: true;
}
static bool
xport_rstp_forward_state(const struct xport *xport)
{
return xport->xbridge->rstp && xport->rstp_port
? rstp_forward_in_state(xport_get_rstp_port_state(xport))
: true;
}
static bool
xport_rstp_should_manage_bpdu(const struct xport *xport)
{
return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
}
static void
rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
{
struct dp_packet payload = *packet;
struct eth_header *eth = dp_packet_data(&payload);
/* Sink packets on ports that have no RSTP. */
if (!xport->rstp_port) {
return;
}
/* Trim off padding on payload. */
if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
}
int len = ETH_HEADER_LEN + LLC_HEADER_LEN;
if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
len += VLAN_HEADER_LEN;
}
if (dp_packet_try_pull(&payload, len)) {
rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
dp_packet_size(&payload));
}
}
static struct xport *
get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
{
struct xport *xport;
HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
&xbridge->xports) {
if (xport->ofp_port == ofp_port) {
return xport;
}
}
return NULL;
}
static odp_port_t
ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
{
const struct xport *xport = get_ofp_port(xbridge, ofp_port);
return xport ? xport->odp_port : ODPP_NONE;
}
static bool
odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
{
struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
return xport && xport->may_enable;
}
static struct ofputil_bucket *
group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
int depth);
static bool
group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
{
struct group_dpif *group;
group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
ctx->xin->tables_version, false);
if (group) {
return group_first_live_bucket(ctx, group, depth) != NULL;
}
return false;
}
#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
static bool
bucket_is_alive(const struct xlate_ctx *ctx,
struct ofputil_bucket *bucket, int depth)
{
if (depth >= MAX_LIVENESS_RECURSION) {
xlate_report_error(ctx, "bucket chaining exceeded %d links",
MAX_LIVENESS_RECURSION);
return false;
}
return (!ofputil_bucket_has_liveness(bucket)
|| (bucket->watch_port != OFPP_ANY
&& odp_port_is_alive(ctx, bucket->watch_port))
|| (bucket->watch_group != OFPG_ANY
&& group_is_alive(ctx, bucket->watch_group, depth + 1)));
}
static void
xlate_report_bucket_not_live(const struct xlate_ctx *ctx,
const struct ofputil_bucket *bucket)
{
if (OVS_UNLIKELY(ctx->xin->trace)) {
struct ds s = DS_EMPTY_INITIALIZER;
if (bucket->watch_port != OFPP_ANY) {
ds_put_cstr(&s, "port ");
ofputil_format_port(bucket->watch_port, NULL, &s);
}
if (bucket->watch_group != OFPG_ANY) {
if (s.length) {
ds_put_cstr(&s, " and ");
}
ds_put_format(&s, "port %"PRIu32, bucket->watch_group);
}
xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": not live due to %s",
bucket->bucket_id, ds_cstr(&s));
ds_destroy(&s);
}
}
static struct ofputil_bucket *
group_first_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group, int depth)
{
struct ofputil_bucket *bucket;
LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
if (bucket_is_alive(ctx, bucket, depth)) {
return bucket;
}
xlate_report_bucket_not_live(ctx, bucket);
}
return NULL;
}
static struct ofputil_bucket *
group_best_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group,
uint32_t basis)
{
struct ofputil_bucket *best_bucket = NULL;
uint32_t best_score = 0;
struct ofputil_bucket *bucket;
LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
if (bucket_is_alive(ctx, bucket, 0)) {
uint32_t score =
(hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
if (score >= best_score) {
best_bucket = bucket;
best_score = score;
}
xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": score %"PRIu32,
bucket->bucket_id, score);
} else {
xlate_report_bucket_not_live(ctx, bucket);
}
}
return best_bucket;
}
static bool
xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
{
return (bundle->vlan_mode != PORT_VLAN_ACCESS
&& (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
}
static bool
xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
{
return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
}
static bool
xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
{
switch (xbundle->vlan_mode) {
case PORT_VLAN_ACCESS:
return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
case PORT_VLAN_TRUNK:
case PORT_VLAN_NATIVE_UNTAGGED:
case PORT_VLAN_NATIVE_TAGGED:
return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
case PORT_VLAN_DOT1Q_TUNNEL:
return xvlan->v[0].vid == xbundle->vlan &&
xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
default:
OVS_NOT_REACHED();
}
}
static mirror_mask_t
xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
{
return xbundle != &ofpp_none_bundle
? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
: 0;
}
static mirror_mask_t
xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
{
return xbundle != &ofpp_none_bundle
? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
: 0;
}
static mirror_mask_t
xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
{
return xbundle != &ofpp_none_bundle
? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
: 0;
}
static struct xbundle *
lookup_input_bundle__(const struct xbridge *xbridge,
ofp_port_t in_port, struct xport **in_xportp)
{
struct xport *xport;
/* Find the port and bundle for the received packet. */
xport = get_ofp_port(xbridge, in_port);
if (in_xportp) {
*in_xportp = xport;
}
if (xport && xport->xbundle) {
return xport->xbundle;
}
/* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
* which a controller may use as the ingress port for traffic that
* it is sourcing. */
if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
return &ofpp_none_bundle;
}
return NULL;
}
static struct xbundle *
lookup_input_bundle(const struct xlate_ctx *ctx,
ofp_port_t in_port, struct xport **in_xportp)
{
struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
in_port, in_xportp);
if (!xbundle) {
/* Odd. A few possible reasons here:
*
* - We deleted a port but there are still a few packets queued up
* from it.
*
* - Someone externally added a port (e.g. "ovs-dpctl add-if") that
* we don't know about.
*
* - The ofproto client didn't configure the port as part of a bundle.
* This is particularly likely to happen if a packet was received on
* the port after it was created, but before the client had a chance
* to configure its bundle.
*/
xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
in_port);
}
return xbundle;
}
/* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
* given the packet is ingressing or egressing on 'xbundle', which has ingress
* or egress (as appropriate) mirrors 'mirrors'. */
static void
mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
mirror_mask_t mirrors)
{
struct xvlan in_xvlan;
struct xvlan xvlan;
/* Figure out what VLAN the packet is in (because mirrors can select
* packets on basis of VLAN). */
xvlan_extract(&ctx->xin->flow, &in_xvlan);
if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
return;
}
xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
const struct xbridge *xbridge = ctx->xbridge;
/* Don't mirror to destinations that we've already mirrored to. */
mirrors &= ~ctx->mirrors;
if (!mirrors) {
return;
}
/* 'mirrors' is a bit-mask of candidates for mirroring. Iterate through
* the candidates, adding the ones that really should be mirrored to
* 'used_mirrors', as long as some candidates remain. */
mirror_mask_t used_mirrors = 0;
while (mirrors) {
const unsigned long *vlans;
mirror_mask_t dup_mirrors;
struct ofbundle *out;
int out_vlan;
int snaplen;
/* Get the details of the mirror represented by the rightmost 1-bit. */
ovs_assert(mirror_get(xbridge->mbridge, raw_ctz(mirrors),
&vlans, &dup_mirrors,
&out, &snaplen, &out_vlan));
/* If this mirror selects on the basis of VLAN, and it does not select
* 'vlan', then discard this mirror and go on to the next one. */
if (vlans) {
ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
}
if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
mirrors = zero_rightmost_1bit(mirrors);
continue;
}
/* We sent a packet to this mirror. */
used_mirrors |= rightmost_1bit(mirrors);
/* Record the mirror, and the mirrors that output to the same
* destination, so that we don't mirror to them again. This must be
* done now to ensure that output_normal(), below, doesn't recursively
* output to the same mirrors. */
ctx->mirrors |= dup_mirrors;
ctx->mirror_snaplen = snaplen;
/* Send the packet to the mirror. */
if (out) {
struct xbundle *out_xbundle = xbundle_lookup(ctx->xcfg, out);
if (out_xbundle) {
output_normal(ctx, out_xbundle, &xvlan);
}
} else if (xvlan.v[0].vid != out_vlan
&& !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
struct xbundle *xb;
uint16_t old_vid = xvlan.v[0].vid;
xvlan.v[0].vid = out_vlan;
LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
if (xbundle_includes_vlan(xb, &xvlan)
&& !xbundle_mirror_out(xbridge, xb)) {
output_normal(ctx, xb, &xvlan);
}
}
xvlan.v[0].vid = old_vid;
}
/* output_normal() could have recursively output (to different
* mirrors), so make sure that we don't send duplicates. */
mirrors &= ~ctx->mirrors;
ctx->mirror_snaplen = 0;
}
if (used_mirrors) {
if (ctx->xin->resubmit_stats) {
mirror_update_stats(xbridge->mbridge, used_mirrors,
ctx->xin->resubmit_stats->n_packets,
ctx->xin->resubmit_stats->n_bytes);
}
if (ctx->xin->xcache) {
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
entry->mirror.mirrors = used_mirrors;
}
}
}
static void
mirror_ingress_packet(struct xlate_ctx *ctx)
{
if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
struct xbundle *xbundle = lookup_input_bundle(
ctx, ctx->xin->flow.in_port.ofp_port, NULL);
if (xbundle) {
mirror_packet(ctx, xbundle,
xbundle_mirror_src(ctx->xbridge, xbundle));
}
}
}
/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
* If so, returns true. Otherwise, returns false.
*
* 'vid' should be the VID obtained from the 802.1Q header that was received as
* part of a packet (specify 0 if there was no 802.1Q header), in the range
* 0...4095. */
static bool
input_vid_is_valid(const struct xlate_ctx *ctx,
uint16_t vid, struct xbundle *in_xbundle)
{
/* Allow any VID on the OFPP_NONE port. */
if (in_xbundle == &ofpp_none_bundle) {
return true;
}
switch (in_xbundle->vlan_mode) {
case PORT_VLAN_ACCESS:
if (vid) {
xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
"packet received on port %s configured as VLAN "
"%d access port", vid, in_xbundle->name,
in_xbundle->vlan);
return false;
}
return true;
case PORT_VLAN_NATIVE_UNTAGGED:
case PORT_VLAN_NATIVE_TAGGED:
if (!vid) {
/* Port must always carry its native VLAN. */
return true;
}
/* Fall through. */
case PORT_VLAN_TRUNK:
if (!xbundle_trunks_vlan(in_xbundle, vid)) {
xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
"received on port %s not configured for "
"trunking VLAN %"PRIu16,
vid, in_xbundle->name, vid);
return false;
}
return true;
case PORT_VLAN_DOT1Q_TUNNEL:
if (!xbundle_allows_cvlan(in_xbundle, vid)) {
xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
"on dot1q-tunnel port %s that excludes this "
"VLAN", vid, in_xbundle->name);
return false;
}
return true;
default:
OVS_NOT_REACHED();
}
}
static void
xvlan_copy(struct xvlan *dst, const struct xvlan *src)
{
*dst = *src;
}
static void
xvlan_pop(struct xvlan *src)
{
memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
}
static void
xvlan_push_uninit(struct xvlan *src)
{
memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
memset(&src->v[0], 0, sizeof(src->v[0]));
}
/* Extract VLAN information (headers) from flow */
static void
xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
{
int i;
memset(xvlan, 0, sizeof(*xvlan));
for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
if (!eth_type_vlan(flow->vlans[i].tpid) ||
!(flow->vlans[i].tci & htons(VLAN_CFI))) {
break;
}
xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
}
}
/* Put VLAN information (headers) to flow */
static void
xvlan_put(struct flow *flow, const struct xvlan *xvlan)
{
ovs_be16 tci;
int i;
for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
if (tci) {
tci |= htons(VLAN_CFI);
flow->vlans[i].tpid = xvlan->v[i].tpid ?
htons(xvlan->v[i].tpid) :
htons(ETH_TYPE_VLAN_8021Q);
}
flow->vlans[i].tci = tci;
}
}
/* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
* of a packet, and 'in_xbundle', the bundle on which the packet was received,
* returns the VLANs of the packet during bridge internal processing. */
static void
xvlan_input_translate(const struct xbundle *in_xbundle,
const struct xvlan *in_xvlan, struct xvlan *xvlan)
{
switch (in_xbundle->vlan_mode) {
case PORT_VLAN_ACCESS:
memset(xvlan, 0, sizeof(*xvlan));
xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
ETH_TYPE_VLAN_8021Q;
xvlan->v[0].vid = in_xbundle->vlan;
xvlan->v[0].pcp = in_xvlan->v[0].pcp;
break;
case PORT_VLAN_TRUNK:
xvlan_copy(xvlan, in_xvlan);
break;
case PORT_VLAN_NATIVE_UNTAGGED:
case PORT_VLAN_NATIVE_TAGGED:
xvlan_copy(xvlan, in_xvlan);
if (!in_xvlan->v[0].vid) {
xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
ETH_TYPE_VLAN_8021Q;
xvlan->v[0].vid = in_xbundle->vlan;
xvlan->v[0].pcp = in_xvlan->v[0].pcp;
}
break;
case PORT_VLAN_DOT1Q_TUNNEL:
xvlan_copy(xvlan, in_xvlan);
xvlan_push_uninit(xvlan);
xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
xvlan->v[0].vid = in_xbundle->vlan;
xvlan->v[0].pcp = 0;
break;
default:
OVS_NOT_REACHED();
}
}
/* Given 'xvlan', the VLANs of a packet during internal processing, and
* 'out_xbundle', a bundle on which the packet is to be output, returns the
* VLANs that should be included in output packet. */
static void
xvlan_output_translate(const struct xbundle *out_xbundle,
const struct xvlan *xvlan, struct xvlan *out_xvlan)
{
switch (out_xbundle->vlan_mode) {
case PORT_VLAN_ACCESS:
memset(out_xvlan, 0, sizeof(*out_xvlan));
break;
case PORT_VLAN_TRUNK:
case PORT_VLAN_NATIVE_TAGGED:
xvlan_copy(out_xvlan, xvlan);
break;
case PORT_VLAN_NATIVE_UNTAGGED:
xvlan_copy(out_xvlan, xvlan);
if (xvlan->v[0].vid == out_xbundle->vlan) {
xvlan_pop(out_xvlan);
}
break;
case PORT_VLAN_DOT1Q_TUNNEL:
xvlan_copy(out_xvlan, xvlan);
xvlan_pop(out_xvlan);
break;
default:
OVS_NOT_REACHED();
}
}
/* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
static void
check_and_set_cvlan_mask(struct flow_wildcards *wc,
const struct xbundle *xbundle)
{
if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
wc->masks.vlans[1].tci = htons(0xffff);
}
}
static void
output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
const struct xvlan *xvlan)
{
uint16_t vid;
union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
struct xport *xport;
struct xlate_bond_recirc xr;
bool use_recirc = false;
struct xvlan out_xvlan;
check_and_set_cvlan_mask(ctx->wc, out_xbundle);
xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
if (out_xbundle->use_priority_tags) {
out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
VLAN_PCP_MASK;
}
vid = out_xvlan.v[0].vid;
if (ovs_list_is_empty(&out_xbundle->xports)) {
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct flow_wildcards *wc = ctx->wc;
struct ofport_dpif *ofport;
if (ctx->xbridge->support.odp.recirc) {
/* In case recirculation is not actually in use, 'xr.recirc_id'
* will be set to '0', since a valid 'recirc_id' can
* not be zero. */
bond_update_post_recirc_rules(out_xbundle->bond,
&xr.recirc_id,
&xr.hash_basis);
if (xr.recirc_id) {
/* Use recirculation instead of output. */
use_recirc = true;
xr.hash_alg = OVS_HASH_ALG_L4;
/* Recirculation does not require unmasking hash fields. */
wc = NULL;
}
}
ofport = bond_choose_output_slave(out_xbundle->bond,
&ctx->xin->flow, wc, vid);
xport = xport_lookup(ctx->xcfg, ofport);
if (!xport) {
/* No slaves enabled, so drop packet. */
return;
}
/* If use_recirc is set, the main thread will handle stats
* accounting for this bond. */
if (!use_recirc) {
if (ctx->xin->resubmit_stats) {
bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
ctx->xin->resubmit_stats->n_bytes);
}
if (ctx->xin->xcache) {
struct xc_entry *entry;
struct flow *flow;
flow = &ctx->xin->flow;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
entry->bond.bond = bond_ref(out_xbundle->bond);
entry->bond.flow = xmemdup(flow, sizeof *flow);
entry->bond.vid = vid;
}
}
}
memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
xvlan_put(&ctx->xin->flow, &out_xvlan);
compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
false, false);
memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
}
/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
* migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
* indicate this; newer upstream kernels use gratuitous ARP requests. */
static bool
is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
{
if (flow->dl_type != htons(ETH_TYPE_ARP)) {
return false;
}
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
if (!eth_addr_is_broadcast(flow->dl_dst)) {
return false;
}
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
if (flow->nw_proto == ARP_OP_REPLY) {
return true;
} else if (flow->nw_proto == ARP_OP_REQUEST) {
memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
return flow->nw_src == flow->nw_dst;
} else {
return false;
}
}
/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
* dropped. Returns true if they may be forwarded, false if they should be
* dropped.
*
* 'in_port' must be the xport that corresponds to flow->in_port.
* 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
*
* 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
* returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
* checked by input_vid_is_valid().
*
* May also add tags to '*tags', although the current implementation only does
* so in one special case.
*/
static bool
is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
uint16_t vlan)
{
struct xbundle *in_xbundle = in_port->xbundle;
const struct xbridge *xbridge = ctx->xbridge;
struct flow *flow = &ctx->xin->flow;
/* Drop frames for reserved multicast addresses
* only if forward_bpdu option is absent. */
if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
xlate_report(ctx, OFT_DETAIL,
"packet has reserved destination MAC, dropping");
return false;
}
if (in_xbundle->bond) {
struct mac_entry *mac;
switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
flow->dl_dst)) {
case BV_ACCEPT:
break;
case BV_DROP:
xlate_report(ctx, OFT_DETAIL,
"bonding refused admissibility, dropping");
return false;
case BV_DROP_IF_MOVED:
ovs_rwlock_rdlock(&xbridge->ml->rwlock);
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
if (mac
&& mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
&& (!is_gratuitous_arp(flow, ctx->wc)
|| mac_entry_is_grat_arp_locked(mac))) {
ovs_rwlock_unlock(&xbridge->ml->rwlock);
xlate_report(ctx, OFT_DETAIL,
"SLB bond thinks this packet looped back, "
"dropping");
return false;
}
ovs_rwlock_unlock(&xbridge->ml->rwlock);
break;
}
}
return true;
}
static bool
update_learning_table__(const struct xbridge *xbridge,
struct xbundle *in_xbundle, struct eth_addr dl_src,
int vlan, bool is_grat_arp)
{
return (in_xbundle == &ofpp_none_bundle
|| !mac_learning_update(xbridge->ml, dl_src, vlan,
is_grat_arp,
in_xbundle->bond != NULL,
in_xbundle->ofbundle));
}
static void
update_learning_table(const struct xlate_ctx *ctx,
struct xbundle *in_xbundle, struct eth_addr dl_src,
int vlan, bool is_grat_arp)
{
if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
is_grat_arp)) {
xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
"on port %s in VLAN %d",
ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
}
}
/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
* was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
static void
update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
const struct flow *flow,
struct mcast_snooping *ms, int vlan,
struct xbundle *in_xbundle,
const struct dp_packet *packet)
OVS_REQ_WRLOCK(ms->rwlock)
{
const struct igmp_header *igmp;
int count;
size_t offset;
ovs_be32 ip4 = flow->igmp_group_ip4;
offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
xlate_report_debug(ctx, OFT_DETAIL,
"multicast snooping received bad IGMP "
"checksum on port %s in VLAN %d",
in_xbundle->name, vlan);
return;
}
switch (ntohs(flow->tp_src)) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
xlate_report_debug(ctx, OFT_DETAIL,
"multicast snooping learned that "
IP_FMT" is on port %s in VLAN %d",
IP_ARGS(ip4), in_xbundle->name, vlan);
}
break;
case IGMP_HOST_LEAVE_MESSAGE:
if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
IP_FMT" is on port %s in VLAN %d",
IP_ARGS(ip4), in_xbundle->name, vlan);
}
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
in_xbundle->ofbundle)) {
xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
"from "IP_FMT" is on port %s in VLAN %d",
IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
}
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
count = mcast_snooping_add_report(ms, packet, vlan,
in_xbundle->ofbundle);
if (count) {
xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
"%d addresses on port %s in VLAN %d",
count, in_xbundle->name, vlan);
}
break;
}
}
static void
update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
const struct flow *flow,
struct mcast_snooping *ms, int vlan,
struct xbundle *in_xbundle,
const struct dp_packet *packet)
OVS_REQ_WRLOCK(ms->rwlock)
{
const struct mld_header *mld;
int count;
size_t offset;
offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
if (!mld ||
packet_csum_upperlayer6(dp_packet_l3(packet),
mld, IPPROTO_ICMPV6,
dp_packet_l4_size(packet)) != 0) {
xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
"bad MLD checksum on port %s in VLAN %d",
in_xbundle->name, vlan);
return;
}
switch (ntohs(flow->tp_src)) {
case MLD_QUERY:
if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
&& mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
"port %s in VLAN %d", in_xbundle->name, vlan);
}
break;
case MLD_REPORT:
case MLD_DONE:
case MLD2_REPORT:
count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
if (count) {
xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
"%d addresses on port %s in VLAN %d",
count, in_xbundle->name, vlan);
}
break;
}
}
/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
* was received on 'in_xbundle' in 'vlan'. */
static void
update_mcast_snooping_table(const struct xlate_ctx *ctx,
const struct flow *flow, int vlan,
struct xbundle *in_xbundle,
const struct dp_packet *packet)
{
struct mcast_snooping *ms = ctx->xbridge->ms;
struct xbundle *mcast_xbundle;
struct mcast_port_bundle *fport;
/* Don't learn the OFPP_NONE port. */
if (in_xbundle == &ofpp_none_bundle) {
return;
}
/* Don't learn from flood ports */
mcast_xbundle = NULL;
ovs_rwlock_wrlock(&ms->rwlock);
LIST_FOR_EACH(fport, node, &ms->fport_list) {
mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
if (mcast_xbundle == in_xbundle) {
break;
}
}
if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
if (flow->dl_type == htons(ETH_TYPE_IP)) {
update_mcast_snooping_table4__(ctx, flow, ms, vlan,
in_xbundle, packet);
} else {
update_mcast_snooping_table6__(ctx, flow, ms, vlan,
in_xbundle, packet);
}
}
ovs_rwlock_unlock(&ms->rwlock);
}
/* A list of multicast output ports.
*
* We accumulate output ports and then do all the outputs afterward. It would
* be more natural to do the outputs one at a time as we discover the need for
* each one, but this can cause a deadlock because we need to take the
* mcast_snooping's rwlock for reading to iterate through the port lists and
* doing an output, if it goes to a patch port, can eventually come back to the
* same mcast_snooping and attempt to take the write lock (see
* https://github.com/openvswitch/ovs-issues/issues/153). */
struct mcast_output {
/* Discrete ports. */
struct xbundle **xbundles;
size_t n, allocated;
/* If set, flood to all ports. */
bool flood;
};
#define MCAST_OUTPUT_INIT { NULL, 0, 0, false }
/* Add 'mcast_bundle' to 'out'. */
static void
mcast_output_add(struct mcast_output *out, struct xbundle *mcast_xbundle)
{
if (out->n >= out->allocated) {
out->xbundles = x2nrealloc(out->xbundles, &out->allocated,
sizeof *out->xbundles);
}
out->xbundles[out->n++] = mcast_xbundle;
}
/* Outputs the packet in 'ctx' to all of the output ports in 'out', given input
* bundle 'in_xbundle' and the current 'xvlan'. */
static void
mcast_output_finish(struct xlate_ctx *ctx, struct mcast_output *out,
struct xbundle *in_xbundle, struct xvlan *xvlan)
{
if (out->flood) {
xlate_normal_flood(ctx, in_xbundle, xvlan);
} else {
for (size_t i = 0; i < out->n; i++) {
output_normal(ctx, out->xbundles[i], xvlan);
}
}
free(out->xbundles);
}
/* send the packet to ports having the multicast group learned */
static void
xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
struct mcast_snooping *ms OVS_UNUSED,
struct mcast_group *grp,
struct xbundle *in_xbundle,
struct mcast_output *out)
OVS_REQ_RDLOCK(ms->rwlock)
{
struct mcast_group_bundle *b;
struct xbundle *mcast_xbundle;
LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
mcast_xbundle = xbundle_lookup(ctx->xcfg, b->port);
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
mcast_output_add(out, mcast_xbundle);
} else if (!mcast_xbundle) {
xlate_report(ctx, OFT_WARN,
"mcast group port is unknown, dropping");
} else {
xlate_report(ctx, OFT_DETAIL,
"mcast group port is input port, dropping");
}
}
}
/* send the packet to ports connected to multicast routers */
static void
xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
struct mcast_snooping *ms,
struct xbundle *in_xbundle,
const struct xvlan *xvlan,
struct mcast_output *out)
OVS_REQ_RDLOCK(ms->rwlock)
{
struct mcast_mrouter_bundle *mrouter;
struct xbundle *mcast_xbundle;
LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
mcast_xbundle = xbundle_lookup(ctx->xcfg, mrouter->port);
if (mcast_xbundle && mcast_xbundle != in_xbundle
&& mrouter->vlan == xvlan->v[0].vid) {
xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
mcast_output_add(out, mcast_xbundle);
} else if (!mcast_xbundle) {
xlate_report(ctx, OFT_WARN,
"mcast router port is unknown, dropping");
} else if (mrouter->vlan != xvlan->v[0].vid) {
xlate_report(ctx, OFT_DETAIL,
"mcast router is on another vlan, dropping");
} else {
xlate_report(ctx, OFT_DETAIL,
"mcast router port is input port, dropping");
}
}
}
/* send the packet to ports flagged to be flooded */
static void
xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
struct mcast_snooping *ms,
struct xbundle *in_xbundle,
struct mcast_output *out)
OVS_REQ_RDLOCK(ms->rwlock)
{
struct mcast_port_bundle *fport;
struct xbundle *mcast_xbundle;
LIST_FOR_EACH(fport, node, &ms->fport_list) {
mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
mcast_output_add(out, mcast_xbundle);
} else if (!mcast_xbundle) {
xlate_report(ctx, OFT_WARN,
"mcast flood port is unknown, dropping");
} else {
xlate_report(ctx, OFT_DETAIL,
"mcast flood port is input port, dropping");
}
}
}
/* forward the Reports to configured ports */
static void
xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
struct mcast_snooping *ms,
struct xbundle *in_xbundle,
struct mcast_output *out)
OVS_REQ_RDLOCK(ms->rwlock)
{
struct mcast_port_bundle *rport;
struct xbundle *mcast_xbundle;
LIST_FOR_EACH(rport, node, &ms->rport_list) {
mcast_xbundle = xbundle_lookup(ctx->xcfg, rport->port);
if (mcast_xbundle
&& mcast_xbundle != in_xbundle
&& mcast_xbundle->ofbundle != in_xbundle->ofbundle) {
xlate_report(ctx, OFT_DETAIL,
"forwarding report to mcast flagged port");
mcast_output_add(out, mcast_xbundle);
} else if (!mcast_xbundle) {
xlate_report(ctx, OFT_WARN,
"mcast port is unknown, dropping the report");
} else {
xlate_report(ctx, OFT_DETAIL,
"mcast port is input port, dropping the Report");
}
}
}
static void
xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
struct xvlan *xvlan)
{
struct xbundle *xbundle;
LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
if (xbundle != in_xbundle
&& xbundle->ofbundle != in_xbundle->ofbundle
&& xbundle_includes_vlan(xbundle, xvlan)
&& xbundle->floodable
&& !xbundle_mirror_out(ctx->xbridge, xbundle)) {
output_normal(ctx, xbundle, xvlan);
}
}
ctx->nf_output_iface = NF_OUT_FLOOD;
}
static bool
is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
{
if (flow->dl_type == htons(ETH_TYPE_IP)) {
memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
return ip_is_local_multicast(flow->nw_dst);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
return ipv6_is_all_hosts(&flow->ipv6_dst);
} else {
return false;
}
}
static void
xlate_normal(struct xlate_ctx *ctx)
{
struct flow_wildcards *wc = ctx->wc;
struct flow *flow = &ctx->xin->flow;
struct xbundle *in_xbundle;
struct xport *in_port;
struct mac_entry *mac;
void *mac_port;
struct xvlan in_xvlan;
struct xvlan xvlan;
uint16_t vlan;
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
if (!in_xbundle) {
xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
return;
}
/* Drop malformed frames. */
if (eth_type_vlan(flow->dl_type) &&
!(flow->vlans[0].tci & htons(VLAN_CFI))) {
if (ctx->xin->packet != NULL) {
xlate_report_error(ctx, "dropping packet with partial "
"VLAN tag received on port %s",
in_xbundle->name);
}
xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
return;
}
/* Drop frames on bundles reserved for mirroring. */
if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
if (ctx->xin->packet != NULL) {
xlate_report_error(ctx, "dropping packet received on port %s, "
"which is reserved exclusively for mirroring",
in_xbundle->name);
}
xlate_report(ctx, OFT_WARN,
"input port is mirror output port, dropping");
return;
}
/* Check VLAN. */
xvlan_extract(flow, &in_xvlan);
if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
xlate_report(ctx, OFT_WARN,
"disallowed VLAN VID for this input port, dropping");
return;
}
xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
vlan = xvlan.v[0].vid;
/* Check other admissibility requirements. */
if (in_port && !is_admissible(ctx, in_port, vlan)) {
return;
}
/* Learn source MAC. */
bool is_grat_arp = is_gratuitous_arp(flow, wc);
if (ctx->xin->allow_side_effects
&& flow->packet_type == htonl(PT_ETH)
&& in_port->pt_mode != NETDEV_PT_LEGACY_L3
) {
update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
is_grat_arp);
}
if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
struct xc_entry *entry;
/* Save just enough info to update mac learning table later. */
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
entry->normal.ofproto = ctx->xbridge->ofproto;
entry->normal.in_port = flow->in_port.ofp_port;
entry->normal.dl_src = flow->dl_src;
entry->normal.vlan = vlan;
entry->normal.is_gratuitous_arp = is_grat_arp;
}
/* Determine output bundle. */
if (mcast_snooping_enabled(ctx->xbridge->ms)
&& !eth_addr_is_broadcast(flow->dl_dst)
&& eth_addr_is_multicast(flow->dl_dst)
&& is_ip_any(flow)) {
struct mcast_snooping *ms = ctx->xbridge->ms;
struct mcast_group *grp = NULL;
if (is_igmp(flow, wc)) {
/*
* IGMP packets need to take the slow path, in order to be
* processed for mdb updates. That will prevent expires
* firing off even after hosts have sent reports.
*/
ctx->xout->slow |= SLOW_ACTION;
memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
if (mcast_snooping_is_membership(flow->tp_src) ||
mcast_snooping_is_query(flow->tp_src)) {
if (ctx->xin->allow_side_effects && ctx->xin->packet) {
update_mcast_snooping_table(ctx, flow, vlan,
in_xbundle, ctx->xin->packet);
}
}
if (mcast_snooping_is_membership(flow->tp_src)) {
struct mcast_output out = MCAST_OUTPUT_INIT;
ovs_rwlock_rdlock(&ms->rwlock);
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
&out);
/* RFC4541: section 2.1.1, item 1: A snooping switch should
* forward IGMP Membership Reports only to those ports where
* multicast routers are attached. Alternatively stated: a
* snooping switch should not forward IGMP Membership Reports
* to ports on which only hosts are attached.
* An administrative control may be provided to override this
* restriction, allowing the report messages to be flooded to
* other ports. */
xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
ovs_rwlock_unlock(&ms->rwlock);
mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
} else {
xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
xlate_normal_flood(ctx, in_xbundle, &xvlan);
}
return;
} else if (is_mld(flow, wc)) {
ctx->xout->slow |= SLOW_ACTION;
if (ctx->xin->allow_side_effects && ctx->xin->packet) {
update_mcast_snooping_table(ctx, flow, vlan,
in_xbundle, ctx->xin->packet);
}
if (is_mld_report(flow, wc)) {
struct mcast_output out = MCAST_OUTPUT_INIT;
ovs_rwlock_rdlock(&ms->rwlock);
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
&out);
xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
ovs_rwlock_unlock(&ms->rwlock);
mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
} else {
xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
xlate_normal_flood(ctx, in_xbundle, &xvlan);
}
} else {
if (is_ip_local_multicast(flow, wc)) {
/* RFC4541: section 2.1.2, item 2: Packets with a dst IP
* address in the 224.0.0.x range which are not IGMP must
* be forwarded on all ports */
xlate_report(ctx, OFT_DETAIL,
"RFC4541: section 2.1.2, item 2, flooding");
xlate_normal_flood(ctx, in_xbundle, &xvlan);
return;
}
}
/* forwarding to group base ports */
struct mcast_output out = MCAST_OUTPUT_INIT;
ovs_rwlock_rdlock(&ms->rwlock);
if (flow->dl_type == htons(ETH_TYPE_IP)) {
grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
}
if (grp) {
xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &out);
xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
&out);
} else {
if (mcast_snooping_flood_unreg(ms)) {
xlate_report(ctx, OFT_DETAIL,
"unregistered multicast, flooding");
out.flood = true;
} else {
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
&out);
xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
}
}
ovs_rwlock_unlock(&ms->rwlock);
mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
} else {
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
if (mac_port) {
struct xbundle *mac_xbundle = xbundle_lookup(ctx->xcfg, mac_port);
if (mac_xbundle
&& mac_xbundle != in_xbundle
&& mac_xbundle->ofbundle != in_xbundle->ofbundle) {
xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
output_normal(ctx, mac_xbundle, &xvlan);
} else if (!mac_xbundle) {
xlate_report(ctx, OFT_WARN,
"learned port is unknown, dropping");
} else {
xlate_report(ctx, OFT_DETAIL,
"learned port is input port, dropping");
}
} else {
xlate_report(ctx, OFT_DETAIL,
"no learned MAC for destination, flooding");
xlate_normal_flood(ctx, in_xbundle, &xvlan);
}
}
}
/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
* 'probability' is the number of packets out of UINT32_MAX to sample. The
* 'cookie' is passed back in the callback for each sampled packet.
* 'tunnel_out_port', if not ODPP_NONE, is added as the
* OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
* an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
* 'emit_set_tunnel', sample(sampling_port=1) would translate into
* datapath sample action set(tunnel(...)), sample(...) and it is used
* for sampling egress tunnel information.
*/
static size_t
compose_sample_action(struct xlate_ctx *ctx,
const uint32_t probability,
const struct user_action_cookie *cookie,
const odp_port_t tunnel_out_port,
bool include_actions)
{
if (probability == 0) {
/* No need to generate sampling or the inner action. */
return 0;
}
/* If the slow path meter is configured by the controller,
* insert a meter action before the user space action. */
struct ofproto *ofproto = &ctx->xin->ofproto->up;
uint32_t meter_id = ofproto->slowpath_meter_id;
/* When meter action is not required, avoid generate sample action
* for 100% sampling rate. */
bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
size_t sample_offset = 0, actions_offset = 0;
if (is_sample) {
sample_offset = nl_msg_start_nested(ctx->odp_actions,
OVS_ACTION_ATTR_SAMPLE);
nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
probability);
actions_offset = nl_msg_start_nested(ctx->odp_actions,
OVS_SAMPLE_ATTR_ACTIONS);
}
if (meter_id != UINT32_MAX) {
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
}
odp_port_t odp_port = ofp_port_to_odp_port(
ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
size_t cookie_offset = odp_put_userspace_action(pid, cookie,
sizeof *cookie,
tunnel_out_port,
include_actions,
ctx->odp_actions);
if (is_sample) {
nl_msg_end_nested(ctx->odp_actions, actions_offset);
nl_msg_end_nested(ctx->odp_actions, sample_offset);
}
return cookie_offset;
}
/* If sFLow is not enabled, returns 0 without doing anything.
*
* If sFlow is enabled, appends a template "sample" action to the ODP actions
* in 'ctx'. This action is a template because some of the information needed
* to fill it out is not available until flow translation is complete. In this
* case, this functions returns an offset, which is always nonzero, to pass
* later to fix_sflow_action() to fill in the rest of the template. */
static size_t
compose_sflow_action(struct xlate_ctx *ctx)
{
struct dpif_sflow *sflow = ctx->xbridge->sflow;
if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
return 0;
}
struct user_action_cookie cookie = {
.type = USER_ACTION_COOKIE_SFLOW,
.ofp_in_port = ctx->xin->flow.in_port.ofp_port,
.ofproto_uuid = ctx->xbridge->ofproto->uuid
};
return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
&cookie, ODPP_NONE, true);
}
/* If flow IPFIX is enabled, make sure IPFIX flow sample action
* at egress point of tunnel port is just in front of corresponding
* output action. If bridge IPFIX is enabled, this appends an IPFIX
* sample action to 'ctx->odp_actions'. */
static void
compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
{
struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
odp_port_t tunnel_out_port = ODPP_NONE;
if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
return;
}
/* For input case, output_odp_port is ODPP_NONE, which is an invalid port
* number. */
if (output_odp_port == ODPP_NONE &&
!dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
return;
}
/* For output case, output_odp_port is valid. */
if (output_odp_port != ODPP_NONE) {
if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
return;
}
/* If tunnel sampling is enabled, put an additional option attribute:
* OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
*/
if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
tunnel_out_port = output_odp_port;
}
}
struct user_action_cookie cookie = {
.type = USER_ACTION_COOKIE_IPFIX,
.ofp_in_port = ctx->xin->flow.in_port.ofp_port,
.ofproto_uuid = ctx->xbridge->ofproto->uuid,
.ipfix.output_odp_port = output_odp_port
};
compose_sample_action(ctx,
dpif_ipfix_get_bridge_exporter_probability(ipfix),
&cookie, tunnel_out_port, false);
}
/* Fix "sample" action according to data collected while composing ODP actions,
* as described in compose_sflow_action().
*
* 'user_cookie_offset' must be the offset returned by
* compose_sflow_action(). */
static void
fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
{
const struct flow *base = &ctx->base_flow;
struct user_action_cookie *cookie;
cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
cookie->sflow.vlan_tci = base->vlans[0].tci;
/* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
* port information") for the interpretation of cookie->output. */
switch (ctx->sflow_n_outputs) {
case 0:
/* 0x40000000 | 256 means "packet dropped for unknown reason". */
cookie->sflow.output = 0x40000000 | 256;
break;
case 1:
cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
ctx->xbridge->sflow, ctx->sflow_odp_port);
if (cookie->sflow.output) {
break;
}
/* Fall through. */
default:
/* 0x80000000 means "multiple output ports. */
cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
break;
}
}
static bool
process_special(struct xlate_ctx *ctx, const struct xport *xport)
{
const struct flow *flow = &ctx->xin->flow;
struct flow_wildcards *wc = ctx->wc;
const struct xbridge *xbridge = ctx->xbridge;
const struct dp_packet *packet = ctx->xin->packet;
enum slow_path_reason slow;
if (!xport) {
slow = 0;
} else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
if (packet) {
cfm_process_heartbeat(xport->cfm, packet);
}
slow = SLOW_CFM;
} else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
if (packet) {
bfd_process_packet(xport->bfd, flow, packet);
/* If POLL received, immediately sends FINAL back. */
if (bfd_should_send_packet(xport->bfd)) {
ofproto_dpif_monitor_port_send_soon(xport->ofport);
}
}
slow = SLOW_BFD;
} else if (xport->xbundle && xport->xbundle->lacp
&& flow->dl_type == htons(ETH_TYPE_LACP)) {
if (packet) {
lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
}
slow = SLOW_LACP;
} else if ((xbridge->stp || xbridge->rstp) &&
stp_should_process_flow(flow, wc)) {
if (packet) {
xbridge->stp
? stp_process_packet(xport, packet)
: rstp_process_packet(xport, packet);
}
slow = SLOW_STP;
} else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
if (packet) {
lldp_process_packet(xport->lldp, packet);
}
slow = SLOW_LLDP;
} else {
slow = 0;
}
if (slow) {
ctx->xout->slow |= slow;
return true;
} else {
return false;
}
}
static int
tnl_route_lookup_flow(const struct xlate_ctx *ctx,
const struct flow *oflow,
struct in6_addr *ip, struct in6_addr *src,
struct xport **out_port)
{
char out_dev[IFNAMSIZ];
struct xbridge *xbridge;
struct in6_addr gw;
struct in6_addr dst;
dst = flow_tnl_dst(&oflow->tunnel);
if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
return -ENOENT;
}
if (ipv6_addr_is_set(&gw) &&
(!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
*ip = gw;
} else {
*ip = dst;
}
HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
struct xport *port;
HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
*out_port = port;
return 0;
}
}
}
}
return -ENOENT;
}
static int
compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
struct dp_packet *packet)
{
struct xbridge *xbridge = out_dev->xbridge;
struct ofpact_output output;
struct flow flow;
ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
flow_extract(packet, &flow);
flow.in_port.ofp_port = out_dev->ofp_port;
output.port = OFPP_TABLE;
output.max_len = 0;
return ofproto_dpif_execute_actions__(xbridge->ofproto,
ctx->xin->tables_version, &flow,
NULL, &output.ofpact, sizeof output,
ctx->depth, ctx->resubmits, packet);
}
static void
tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
const struct eth_addr eth_src,
struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
{
struct dp_packet packet;
dp_packet_init(&packet, 0);
compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
compose_table_xlate(ctx, out_dev, &packet);
dp_packet_uninit(&packet);
}
static void
tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
const struct eth_addr eth_src,
ovs_be32 ip_src, ovs_be32 ip_dst)
{
struct dp_packet packet;
dp_packet_init(&packet, 0);
compose_arp(&packet, ARP_OP_REQUEST,
eth_src, eth_addr_zero, true, ip_src, ip_dst);
compose_table_xlate(ctx, out_dev, &packet);
dp_packet_uninit(&packet);
}
static void
propagate_tunnel_data_to_flow__(struct flow *dst_flow,
const struct flow *src_flow,
struct eth_addr dmac, struct eth_addr smac,
struct in6_addr s_ip6, ovs_be32 s_ip,
bool is_tnl_ipv6, uint8_t nw_proto)
{
dst_flow->dl_dst = dmac;
dst_flow->dl_src = smac;
dst_flow->packet_type = htonl(PT_ETH);
dst_flow->nw_dst = src_flow->tunnel.ip_dst;
dst_flow->nw_src = src_flow->tunnel.ip_src;
dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
dst_flow->nw_frag = 0; /* Tunnel packets are unfragmented. */
dst_flow->nw_tos = src_flow->tunnel.ip_tos;
dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
dst_flow->tp_dst = src_flow->tunnel.tp_dst;
dst_flow->tp_src = src_flow->tunnel.tp_src;
if (is_tnl_ipv6) {
dst_flow->dl_type = htons(ETH_TYPE_IPV6);
if (ipv6_mask_is_any(&dst_flow->ipv6_src)
&& !ipv6_mask_is_any(&s_ip6)) {
dst_flow->ipv6_src = s_ip6;
}
} else {
dst_flow->dl_type = htons(ETH_TYPE_IP);
if (dst_flow->nw_src == 0 && s_ip) {
dst_flow->nw_src = s_ip;
}
}
dst_flow->nw_proto = nw_proto;
}
/*
* Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
* translations.
*/
static void
propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
struct eth_addr smac, struct in6_addr s_ip6,
ovs_be32 s_ip, bool is_tnl_ipv6,
enum ovs_vport_type tnl_type)
{
struct flow *base_flow, *flow;
flow = &ctx->xin->flow;
base_flow = &ctx->base_flow;
uint8_t nw_proto = 0;
switch (tnl_type) {
case OVS_VPORT_TYPE_GRE:
case OVS_VPORT_TYPE_ERSPAN:
case OVS_VPORT_TYPE_IP6ERSPAN:
case OVS_VPORT_TYPE_IP6GRE:
nw_proto = IPPROTO_GRE;
break;
case OVS_VPORT_TYPE_VXLAN:
case OVS_VPORT_TYPE_GENEVE:
nw_proto = IPPROTO_UDP;
break;
case OVS_VPORT_TYPE_LISP:
case OVS_VPORT_TYPE_STT:
case OVS_VPORT_TYPE_UNSPEC:
case OVS_VPORT_TYPE_NETDEV:
case OVS_VPORT_TYPE_INTERNAL:
case __OVS_VPORT_TYPE_MAX:
default:
OVS_NOT_REACHED();
}
/*
* Update base_flow first followed by flow as the dst_flow gets modified
* in the function.
*/
propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
is_tnl_ipv6, nw_proto);
propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
is_tnl_ipv6, nw_proto);
}
static int
native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
const struct flow *flow, odp_port_t tunnel_odp_port,
bool truncate)
{
struct netdev_tnl_build_header_params tnl_params;
struct ovs_action_push_tnl tnl_push_data;
struct xport *out_dev = NULL;
ovs_be32 s_ip = 0, d_ip = 0;
struct in6_addr s_ip6 = in6addr_any;
struct in6_addr d_ip6 = in6addr_any;
struct eth_addr smac;
struct eth_addr dmac;
int err;
char buf_sip6[INET6_ADDRSTRLEN];
char buf_dip6[INET6_ADDRSTRLEN];
/* Store sFlow data. */
uint32_t sflow_n_outputs = ctx->sflow_n_outputs;
/* Structures to backup Ethernet and IP of base_flow. */
struct flow old_base_flow;
struct flow old_flow;
/* Backup flow & base_flow data. */
memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
if (flow->tunnel.ip_src) {
in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src);
}
err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
if (err) {
xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
return err;
}
xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
ipv6_string_mapped(buf_dip6, &d_ip6),
netdev_get_name(out_dev->netdev));
/* Use mac addr of bridge port of the peer. */
err = netdev_get_etheraddr(out_dev->netdev, &smac);
if (err) {
xlate_report(ctx, OFT_WARN,
"tunnel output device lacks Ethernet address");
return err;
}
d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
if (d_ip) {
s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
}
err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
if (err) {
xlate_report(ctx, OFT_DETAIL,
"neighbor cache miss for %s on bridge %s, "
"sending %s request",
buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
if (d_ip) {
tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
} else {
tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
}
return err;
}
if (ctx->xin->xcache) {
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
sizeof entry->tnl_neigh_cache.br_name);
entry->tnl_neigh_cache.d_ipv6 = d_ip6;
}
xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
" to "ETH_ADDR_FMT" %s",
ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
ETH_ADDR_ARGS(dmac), buf_dip6);
netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
if (err) {
return err;
}
tnl_push_data.tnl_port = tunnel_odp_port;
tnl_push_data.out_port = out_dev->odp_port;
/* After tunnel header has been added, MAC and IP data of flow and
* base_flow need to be set properly, since there is not recirculation
* any more when sending packet to tunnel. */
propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
s_ip, tnl_params.is_ipv6,
tnl_push_data.tnl_type);
size_t clone_ofs = 0;
size_t push_action_size;
clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
push_action_size = ctx->odp_actions->size;
if (!truncate) {
const struct dpif_flow_stats *backup_resubmit_stats;
struct xlate_cache *backup_xcache;
struct flow_wildcards *backup_wc, wc;
bool backup_side_effects;
const struct dp_packet *backup_packet;
memset(&wc, 0 , sizeof wc);
backup_wc = ctx->wc;
ctx->wc = &wc;
ctx->xin->wc = NULL;
backup_resubmit_stats = ctx->xin->resubmit_stats;
backup_xcache = ctx->xin->xcache;
backup_side_effects = ctx->xin->allow_side_effects;
backup_packet = ctx->xin->packet;
ctx->xin->resubmit_stats = NULL;
ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
ctx->xin->allow_side_effects = false;
ctx->xin->packet = NULL;
/* Push the cache entry for the tunnel first. */
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
entry->tunnel_hdr.operation = ADD;
patch_port_output(ctx, xport, out_dev);
/* Similar to the stats update in revalidation, the x_cache entries
* are populated by the previous translation are used to update the
* stats correctly.
*/
if (backup_resubmit_stats) {
struct dpif_flow_stats stats = *backup_resubmit_stats;
xlate_push_stats(ctx->xin->xcache, &stats);
}
xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
if (ctx->odp_actions->size > push_action_size) {
nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
} else {
nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
}
/* Restore context status. */
ctx->xin->resubmit_stats = backup_resubmit_stats;
xlate_cache_delete(ctx->xin->xcache);
ctx->xin->xcache = backup_xcache;
ctx->xin->allow_side_effects = backup_side_effects;
ctx->xin->packet = backup_packet;
ctx->wc = backup_wc;
} else {
/* In order to maintain accurate stats, use recirc for
* natvie tunneling. */
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
nl_msg_end_nested(ctx->odp_actions, clone_ofs);
}
/* Restore the flows after the translation. */
memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
/* Restore sFlow data. */
ctx->sflow_n_outputs = sflow_n_outputs;
return 0;
}
static void
xlate_commit_actions(struct xlate_ctx *ctx)
{
bool use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
ctx->odp_actions, ctx->wc,
use_masked, ctx->pending_encap,
ctx->pending_decap, ctx->encap_data);
ctx->pending_encap = false;
ctx->pending_decap = false;