diff --git a/examples/datacenter.py b/examples/datacenter.py index 73652d9..3b051a6 100644 --- a/examples/datacenter.py +++ b/examples/datacenter.py @@ -34,4 +34,4 @@ fnss.set_delays_constant(topology, 10, 'ns') # save topology to a file -fnss.write_topology(topology, 'datacenter_topology.xml') \ No newline at end of file +fnss.write_topology(topology, 'datacenter_topology.xml') diff --git a/examples/ns2.py b/examples/ns2.py index 228e78a..fbf2e97 100644 --- a/examples/ns2.py +++ b/examples/ns2.py @@ -29,4 +29,4 @@ fnss.add_application(topology, 9, 'ftp', ftp_app_props) # export topology to a Tcl script for ns-2 -fnss.to_ns2(topology, 'ns2-script.tcl', stacks=True) \ No newline at end of file +fnss.to_ns2(topology, 'ns2-script.tcl', stacks=True) diff --git a/fnss/adapters/jfed.py b/fnss/adapters/jfed.py index 2963006..398db1e 100644 --- a/fnss/adapters/jfed.py +++ b/fnss/adapters/jfed.py @@ -57,8 +57,8 @@ def to_jfed(topology, path, testbed="wall1.ilabt.iminds.be", encoding="utf-8", p pos = nx.random_layout(topology) # Create mapping between links and interface IDs if_names = {} - for v in topology.edge: - next_hops = sorted(topology.edge[v].keys()) + for v in topology.adj: + next_hops = sorted(topology.adj[v].keys()) if_names[v] = {next_hop: i for i, next_hop in enumerate(next_hops)} head = ET.Element('rspec') head.attrib["generated_by"] = "FNSS" @@ -69,7 +69,7 @@ def to_jfed(topology, path, testbed="wall1.ilabt.iminds.be", encoding="utf-8", p head.attrib["xmlns:delay"] = "http://www.protogeni.net/resources/rspec/ext/delay/1" head.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance" # Iterate over nodes - for v in topology.nodes_iter(): + for v in topology.nodes(): node = ET.SubElement(head, 'node') node.attrib['client_id'] = "node%s" % str(v) node.attrib['component_manager_id'] = "urn:publicid:IDN+%s+authority+cm" % testbed @@ -86,7 +86,7 @@ def to_jfed(topology, path, testbed="wall1.ilabt.iminds.be", encoding="utf-8", p # The convention in jFed is to identify links with "linkX" where X is an # integer but making sure that links and nodes have different integers link_id = topology.number_of_nodes() - 1 - for u, v in topology.edges_iter(): + for u, v in topology.edges(): link_id += 1 link = ET.SubElement(head, 'link') link.attrib['client_id'] = "link%s" % str(link_id) @@ -184,7 +184,7 @@ def from_jfed(path): if 'latency' in prop.attrib: edge_attr['delay'] = prop.attrib['latency'] has_delays = True - topology.add_edge(u, v, edge_attr) + topology.add_edge(u, v, **edge_attr) # Set capacity and delay units if has_capacities: topology.graph['capacity_unit'] = 'kbps' diff --git a/fnss/adapters/mn.py b/fnss/adapters/mn.py index 34c1503..4bff04d 100644 --- a/fnss/adapters/mn.py +++ b/fnss/adapters/mn.py @@ -38,7 +38,7 @@ def from_mininet(topology): fnss_topo.add_edge(u, v) opts = topology.linkInfo(u, v) if 'bw' in opts: - fnss_topo.edge[u][v]['capacity'] = opts['bw'] + fnss_topo.adj[u][v]['capacity'] = opts['bw'] if 'delay' in opts: delay = opts['delay'] val = re.findall("\d+\.?\d*", delay)[0] @@ -103,12 +103,12 @@ def to_mininet(topology, switches=None, hosts=None, relabel_nodes=True): raise ImportError('Cannot import mininet.topo package. ' 'Make sure Mininet is installed on this machine.') if hosts is None: - hosts = (v for v in topology.nodes_iter() + hosts = (v for v in topology.nodes() if 'host' in topology.node[v]['type']) if switches is None: - switches = (v for v in topology.nodes_iter() + switches = (v for v in topology.nodes() if 'switch' in topology.node[v]['type']) - nodes = set(topology.nodes_iter()) + nodes = set(topology.nodes()) switches = set(switches) hosts = set(hosts) if not switches.isdisjoint(hosts): @@ -140,16 +140,16 @@ def to_mininet(topology, switches=None, hosts=None, relabel_nodes=True): / capacity_units['Mbps'] if delay_unit: delay_conversion = float(time_units[delay_unit]) / time_units['us'] - for u, v in topology.edges_iter(): + for u, v in topology.edges(): params = {} - if 'capacity' in topology.edge[u][v] and capacity_unit: - params['bw'] = topology.edge[u][v]['capacity'] * capacity_conversion + if 'capacity' in topology.adj[u][v] and capacity_unit: + params['bw'] = topology.adj[u][v]['capacity'] * capacity_conversion # Use Token Bucket filter to implement rate limit params['use_htb'] = True - if 'delay' in topology.edge[u][v] and delay_unit: - params['delay'] = '%sus' % str(topology.edge[u][v]['delay'] + if 'delay' in topology.adj[u][v] and delay_unit: + params['delay'] = '%sus' % str(topology.adj[u][v]['delay'] * delay_conversion) - if 'buffer_size' in topology.edge[u][v] and buffer_unit == 'packets': - params['max_queue_size'] = topology.edge[u][v]['buffer_size'] + if 'buffer_size' in topology.adj[u][v] and buffer_unit == 'packets': + params['max_queue_size'] = topology.adj[u][v]['buffer_size'] topo.addLink(str(u), str(v), **params) return topo diff --git a/fnss/adapters/ns2.py b/fnss/adapters/ns2.py index 659e79b..f166476 100644 --- a/fnss/adapters/ns2.py +++ b/fnss/adapters/ns2.py @@ -34,7 +34,7 @@ set ns [new Simulator] # Create nodes -% for node in topology.nodes_iter(): +% for node in topology.nodes(): set n${str(node)} [$ns node] % endfor @@ -42,40 +42,40 @@ set qtype DropTail ## if topology is undirected, create duplex links, otherwise simplex links % if topology.is_directed(): - % for u, v in topology.edges_iter(): -<% delay = "0" if not set_delays else str(topology.edge[u][v]['delay'] * delay_norm) %>\ -${"$ns simplex-link $n%s $n%s %sMb %sms $qtype" % (str(u), str(v), str(topology.edge[u][v]['capacity'] * capacity_norm), delay)} + % for u, v in topology.edges(): +<% delay = "0" if not set_delays else str(topology.adj[u][v]['delay'] * delay_norm) %>\ +${"$ns simplex-link $n%s $n%s %sMb %sms $qtype" % (str(u), str(v), str(topology.adj[u][v]['capacity'] * capacity_norm), delay)} % endfor % else: - % for u, v in topology.edges_iter(): -<% delay = "0" if not set_delays else str(topology.edge[u][v]['delay'] * delay_norm) %>\ -${"$ns duplex-link $n%s $n%s %sMb %sms $qtype" % (str(u), str(v), str(topology.edge[u][v]['capacity'] * capacity_norm), delay)} + % for u, v in topology.edges(): +<% delay = "0" if not set_delays else str(topology.adj[u][v]['delay'] * delay_norm) %>\ +${"$ns duplex-link $n%s $n%s %sMb %sms $qtype" % (str(u), str(v), str(topology.adj[u][v]['capacity'] * capacity_norm), delay)} % endfor %endif % if set_weights: # Set link weights - % for u, v in topology.edges_iter(): -${"$ns cost $n%s $n%s %s" % (str(u), str(v), str(topology.edge[u][v]['weight']))} + % for u, v in topology.edges(): +${"$ns cost $n%s $n%s %s" % (str(u), str(v), str(topology.adj[u][v]['weight']))} % if not topology.is_directed(): -${"$ns cost $n%s $n%s %s" % (str(v), str(u), str(topology.edge[v][u]['weight']))} +${"$ns cost $n%s $n%s %s" % (str(v), str(u), str(topology.adj[v][u]['weight']))} % endif % endfor % endif % if set_buffers: # Set queue sizes - % for u, v in topology.edges_iter(): -${"$ns queue-limit $n%s $n%s %s" % (str(u), str(v), str(topology.edge[u][v]['buffer']))} + % for u, v in topology.edges(): +${"$ns queue-limit $n%s $n%s %s" % (str(u), str(v), str(topology.adj[u][v]['buffer']))} % if not topology.is_directed(): -${"$ns queue-limit $n%s $n%s %s" % (str(v), str(u), str(topology.edge[v][u]['buffer']))} +${"$ns queue-limit $n%s $n%s %s" % (str(v), str(u), str(topology.adj[v][u]['buffer']))} % endif % endfor % endif % if deploy_stacks: # Deploy applications and agents - % for node in topology.nodes_iter(): + % for node in topology.nodes(): <% stack = get_stack(topology, node) if stack is None: @@ -121,7 +121,7 @@ def validate_ns2_stacks(topology): valid : bool *True* if stacks are valid ns-2 stacks, *False* otherwise """ - for node in topology.nodes_iter(): + for node in topology.nodes(): applications = get_application_names(topology, node) for name in applications: if not 'class' in get_application_properties(topology, node, name): @@ -171,8 +171,8 @@ def to_ns2(topology, path, stacks=True): set_buffers = True set_delays = True # if all links are annotated with weights, then set weights - set_weights = all('weight' in topology.edge[u][v] - for u, v in topology.edges_iter()) + set_weights = all('weight' in topology.adj[u][v] + for u, v in topology.edges()) if not 'capacity_unit' in topology.graph: raise ValueError('The given topology does not have capacity data.') @@ -196,7 +196,7 @@ def to_ns2(topology, path, stacks=True): warn('Some application stacks cannot be parsed correctly. The ' 'output file will be generated without stack assignments.') stacks = False - if not any('stack' in topology.node[v] for v in topology.nodes_iter()): + if not any('stack' in topology.node[v] for v in topology.nodes()): stacks = False template = Template(__TEMPLATE) variables = { diff --git a/fnss/adapters/omnetpp.py b/fnss/adapters/omnetpp.py index 842effe..d4313ed 100644 --- a/fnss/adapters/omnetpp.py +++ b/fnss/adapters/omnetpp.py @@ -32,8 +32,8 @@ name = "net" if name == "" else re.sub("[^A-Za-z0-9]", "_", name).strip(" _") # Get numerical ID of a node -nodes = topology.nodes() -node_map = dict((nodes[i], i) for i in range(len(nodes))) +nodes = list(topology.nodes()) +node_map = {node: i for i, node in enumerate(nodes)} %> // This is the module modelling the nodes of network @@ -44,14 +44,14 @@ // Create network network ${name} { connections allowunconnected: -% for u, v in topology.edges_iter(): +% for u, v in topology.edges(): <% attr_str = "" if set_delays: - delay = delay_norm * topology.edge[u][v]['delay'] + delay = delay_norm * topology.adj[u][v]['delay'] attr_str += " delay=%sms;" % (str(delay)) if set_capacities: - capacity = capacity_norm * topology.edge[u][v]['capacity'] + capacity = capacity_norm * topology.adj[u][v]['capacity'] attr_str += " datarate=%sMbps;" % (str(capacity)) %> ${"node[%s].ppg$o++ --> {%s} --> node[%s].pppg$i++;" % (str(node_map[u]), attr_str.strip(), str(node_map[v]))} diff --git a/fnss/netconfig/buffers.py b/fnss/netconfig/buffers.py index 51de827..1a7bf5c 100644 --- a/fnss/netconfig/buffers.py +++ b/fnss/netconfig/buffers.py @@ -41,21 +41,21 @@ def set_buffer_sizes_bw_delay_prod(topology, buffer_unit='bytes', >>> fnss.set_buffer_sizes_bw_delay_prod(topology) """ try: - assert all(('capacity' in topology.edge[u][v] - for u, v in topology.edges_iter())) - assert all(('delay' in topology.edge[u][v] - for u, v in topology.edges_iter())) + assert all(('capacity' in topology.adj[u][v] + for u, v in topology.edges())) + assert all(('delay' in topology.adj[u][v] + for u, v in topology.edges())) capacity_unit = topology.graph['capacity_unit'] delay_unit = topology.graph['delay_unit'] except (AssertionError, KeyError): raise ValueError('All links must have a capacity and delay attribute') topology.graph['buffer_unit'] = buffer_unit # this filters potential self-loops which would crash the function - edges = [(u, v) for (u, v) in topology.edges_iter() if u != v] + edges = [(u, v) for (u, v) in topology.edges() if u != v] # dictionary listing all end-to-end routes in which a link appears route_presence = dict(zip(edges, [[] for _ in range(len(edges))])) # dictionary with all network routes - route = nx.all_pairs_dijkstra_path(topology, weight='weight') + route = dict(nx.all_pairs_dijkstra_path(topology, weight='weight')) # Dictionary storing end-to-end path delays for each OD pair e2e_delay = {} @@ -67,12 +67,12 @@ def set_buffer_sizes_bw_delay_prod(topology, buffer_unit='bytes', continue path_delay = 0 for u, v in zip(path[:-1], path[1:]): - if 'delay' in topology.edge[u][v]: + if 'delay' in topology.adj[u][v]: if (u, v) in route_presence: route_presence[(u, v)].append((orig, dest)) else: route_presence[(v, u)].append((orig, dest)) - path_delay += topology.edge[u][v]['delay'] + path_delay += topology.adj[u][v]['delay'] else: raise ValueError('No link delays available') e2e_delay[orig][dest] = path_delay @@ -95,11 +95,11 @@ def set_buffer_sizes_bw_delay_prod(topology, buffer_unit='bytes', # link endpoint if that link was used, i.e. twice the delay of the # link if (v, u) in edges: - mean_rtt = topology.edge[u][v]['delay'] + \ - topology.edge[v][u]['delay'] + mean_rtt = topology.adj[u][v]['delay'] + \ + topology.adj[v][u]['delay'] else: try: - mean_rtt = topology.edge[u][v]['delay'] + e2e_delay[v][u] + mean_rtt = topology.adj[u][v]['delay'] + e2e_delay[v][u] except KeyError: raise ValueError('Cannot assign buffer sizes because some ' 'paths do not have corresponding return path') @@ -109,9 +109,9 @@ def set_buffer_sizes_bw_delay_prod(topology, buffer_unit='bytes', if buffer_unit == 'packets': norm_factor /= packet_size for u, v in edges: - capacity = topology.edge[u][v]['capacity'] + capacity = topology.adj[u][v]['capacity'] buffer_size = int(mean_rtt_dict[(u, v)] * capacity * norm_factor) - topology.edge[u][v]['buffer'] = buffer_size + topology.adj[u][v]['buffer'] = buffer_size return @@ -158,8 +158,8 @@ def set_buffer_sizes_link_bandwidth(topology, k=1.0, default_size=None, raise ValueError('k must be a positive number') if default_size is None: if 'capacity_unit' not in topology.graph \ - or not all('capacity' in topology.edge[u][v] - for u, v in topology.edges_iter()): + or not all('capacity' in topology.adj[u][v] + for u, v in topology.edges()): raise ValueError('All links must have a capacity attribute. ' 'Set capacity or specify a default buffer size') topology.graph['buffer_unit'] = buffer_unit @@ -167,13 +167,13 @@ def set_buffer_sizes_link_bandwidth(topology, k=1.0, default_size=None, norm_factor = capacity_units[topology.graph['capacity_unit']] / 8.0 if buffer_unit == 'packets': norm_factor /= packet_size - for u, v in topology.edges_iter(): - if 'capacity' in topology.edge[u][v]: - capacity = topology.edge[u][v]['capacity'] + for u, v in topology.edges(): + if 'capacity' in topology.adj[u][v]: + capacity = topology.adj[u][v]['capacity'] buffer_size = int(k * capacity * norm_factor) else: buffer_size = default_size - topology.edge[u][v]['buffer'] = buffer_size + topology.adj[u][v]['buffer'] = buffer_size def set_buffer_sizes_constant(topology, buffer_size, buffer_unit='bytes', @@ -213,9 +213,9 @@ def set_buffer_sizes_constant(topology, buffer_size, buffer_unit='bytes', 'expressed in %s. Use that unit instead of %s' \ % (curr_buffer_unit, buffer_unit)) topology.graph['buffer_unit'] = buffer_unit - edges = topology.edges_iter() if interfaces is None else interfaces + edges = topology.edges() if interfaces is None else interfaces for u, v in edges: - topology.edge[u][v]['buffer'] = buffer_size + topology.adj[u][v]['buffer'] = buffer_size def get_buffer_sizes(topology): @@ -256,5 +256,5 @@ def clear_buffer_sizes(topology): The topology whose buffer sizes are cleared """ topology.graph.pop('buffer_unit', None) - for u, v in topology.edges_iter(): - topology.edge[u][v].pop('buffer', None) + for u, v in topology.edges(): + topology.adj[u][v].pop('buffer', None) diff --git a/fnss/netconfig/capacities.py b/fnss/netconfig/capacities.py index 5404e8a..2236879 100644 --- a/fnss/netconfig/capacities.py +++ b/fnss/netconfig/capacities.py @@ -3,6 +3,8 @@ Link capacities can be assigned either deterministically or randomly, according to various models. """ +from distutils.version import LooseVersion + import networkx as nx from fnss.util import random_from_pdf @@ -66,9 +68,9 @@ def set_capacities_constant(topology, capacity, capacity_unit='Mbps', / capacity_units[curr_capacity_unit] else: topology.graph['capacity_unit'] = capacity_unit - edges = links or topology.edges_iter() + edges = links or topology.edges() for u, v in edges: - topology.edge[u][v]['capacity'] = capacity * conversion_factor + topology.adj[u][v]['capacity'] = capacity * conversion_factor return @@ -103,8 +105,8 @@ def set_capacities_random(topology, capacity_pdf, capacity_unit='Mbps'): if any((capacity < 0 for capacity in capacity_pdf.keys())): raise ValueError('All capacities in capacity_pdf must be positive') topology.graph['capacity_unit'] = capacity_unit - for u, v in topology.edges_iter(): - topology.edge[u][v]['capacity'] = random_from_pdf(capacity_pdf) + for u, v in topology.edges(): + topology.adj[u][v]['capacity'] = random_from_pdf(capacity_pdf) return @@ -267,11 +269,11 @@ def set_capacities_degree_gravity(topology, capacities, capacity_unit='Mbps'): in_degree = nx.in_degree_centrality(topology) out_degree = nx.out_degree_centrality(topology) gravity = {(u, v): out_degree[u] * in_degree[v] - for (u, v) in topology.edges_iter()} + for (u, v) in topology.edges()} else: degree = nx.degree_centrality(topology) gravity = {(u, v): degree[u] * degree[v] - for (u, v) in topology.edges_iter()} + for (u, v) in topology.edges()} _set_capacities_proportionally(topology, capacities, gravity, capacity_unit=capacity_unit) @@ -372,7 +374,10 @@ def set_capacities_communicability_gravity(topology, capacities, capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """ - centrality = nx.communicability_centrality(topology) + if LooseVersion(nx.__version__) < LooseVersion("2.0"): + centrality = nx.communicability_centrality(topology) + else: + centrality = nx.subgraph_centrality(topology) _set_capacities_gravity(topology, capacities, centrality, capacity_unit) @@ -419,7 +424,7 @@ def set_capacities_edge_communicability(topology, capacities, """ communicability = nx.communicability(topology) centrality = {(u, v): communicability[u][v] - for (u, v) in topology.edges_iter()} + for (u, v) in topology.edges()} _set_capacities_proportionally(topology, capacities, centrality, capacity_unit=capacity_unit) @@ -443,7 +448,7 @@ def _set_capacities_gravity(topology, capacities, node_metric, The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """ gravity = {(u, v): node_metric[u] * node_metric[v] - for (u, v) in topology.edges_iter()} + for (u, v) in topology.edges()} _set_capacities_proportionally(topology, capacities, gravity, capacity_unit=capacity_unit) @@ -503,7 +508,7 @@ def _set_capacities_proportionally(topology, capacities, metric, for i, boundary in enumerate(metric_boundaries): if metric_value <= boundary: capacity = capacities[i] - topology.edge[u][v]['capacity'] = capacity + topology.adj[u][v]['capacity'] = capacity break # if the loop is not stopped yet, it means that because of float # rounding error, max_capacity < metric_boundaries[-1], so we set the @@ -511,7 +516,7 @@ def _set_capacities_proportionally(topology, capacities, metric, # Anyway, the code should never reach this point, because before the # for loop we are already adjusting the value of metric_boundaries[-1] # to make it > max_capacity - else: topology.edge[u][v]['capacity'] = capacities[-1] + else: topology.adj[u][v]['capacity'] = capacities[-1] def get_capacities(topology): @@ -550,5 +555,5 @@ def clear_capacities(topology): topology : Topology """ topology.graph.pop('capacity_unit', None) - for u, v in topology.edges_iter(): - topology.edge[u][v].pop('capacity', None) + for u, v in topology.edges(): + topology.adj[u][v].pop('capacity', None) diff --git a/fnss/netconfig/delays.py b/fnss/netconfig/delays.py index 93c77e6..3930dee 100644 --- a/fnss/netconfig/delays.py +++ b/fnss/netconfig/delays.py @@ -58,9 +58,9 @@ def set_delays_constant(topology, delay=1.0, delay_unit='ms', links=None): / time_units[curr_delay_unit] else: topology.graph['delay_unit'] = delay_unit - edges = links or topology.edges_iter() + edges = links or topology.edges() for u, v in edges: - topology.edge[u][v]['delay'] = delay * conversion_factor + topology.adj[u][v]['delay'] = delay * conversion_factor def set_delays_geo_distance(topology, specific_delay, default_delay=None, @@ -107,7 +107,7 @@ def set_delays_geo_distance(topology, specific_delay, default_delay=None, "topology (%s) is not valid" % distance_unit) edges = links or topology.edges() if default_delay is None: - if any(('length' not in topology.edge[u][v] for u, v in edges)): + if any(('length' not in topology.adj[u][v] for u, v in edges)): raise ValueError('All links must have a length attribute') if 'delay_unit' in topology.graph and links is not None: # If a delay_unit is set, that means that some links have already @@ -124,12 +124,12 @@ def set_delays_geo_distance(topology, specific_delay, default_delay=None, # factor to convert default delay in target delay unit default_conv_factor = time_units[delay_unit] / time_units[curr_delay_unit] for u, v in edges: - if 'length' in topology.edge[u][v]: - length = topology.edge[u][v]['length'] * length_conv_factor + if 'length' in topology.adj[u][v]: + length = topology.adj[u][v]['length'] * length_conv_factor delay = specific_delay * length * conv_factor else: delay = default_delay * default_conv_factor - topology.edge[u][v]['delay'] = delay + topology.adj[u][v]['delay'] = delay def get_delays(topology): @@ -168,5 +168,5 @@ def clear_delays(topology): topology : Topology """ topology.graph.pop('delay_unit', None) - for u, v in topology.edges_iter(): - topology.edge[u][v].pop('delay', None) + for u, v in topology.edges(): + topology.adj[u][v].pop('delay', None) diff --git a/fnss/netconfig/nodeconfig.py b/fnss/netconfig/nodeconfig.py index caf92b1..cdd0b9f 100644 --- a/fnss/netconfig/nodeconfig.py +++ b/fnss/netconfig/nodeconfig.py @@ -89,7 +89,7 @@ def clear_stacks(topology): ---------- topology : Topology """ - for v in topology.nodes_iter(): + for v in topology.nodes(): topology.node[v].pop('stack', None) def add_application(topology, node, name, properties=None, **attr): @@ -188,5 +188,5 @@ def clear_applications(topology): topology : Topology The topology """ - for v in topology.nodes_iter(): + for v in topology.nodes(): topology.node[v].pop('application', None) diff --git a/fnss/netconfig/weights.py b/fnss/netconfig/weights.py index 2aac9b7..7e44cce 100644 --- a/fnss/netconfig/weights.py +++ b/fnss/netconfig/weights.py @@ -29,14 +29,14 @@ def set_weights_inverse_capacity(topology): >>> fnss.set_weights_inverse_capacity(topology) """ try: - max_capacity = float(max((topology.edge[u][v]['capacity'] - for u, v in topology.edges_iter()))) + max_capacity = float(max((topology.adj[u][v]['capacity'] + for u, v in topology.edges()))) except KeyError: raise ValueError('All links must have a capacity attribute') - for u, v in topology.edges_iter(): - capacity = topology.edge[u][v]['capacity'] + for u, v in topology.edges(): + capacity = topology.adj[u][v]['capacity'] weight = max_capacity / capacity - topology.edge[u][v]['weight'] = weight + topology.adj[u][v]['weight'] = weight def set_weights_delays(topology): @@ -58,14 +58,14 @@ def set_weights_delays(topology): """ try: - min_delay = float(min((topology.edge[u][v]['delay'] - for u, v in topology.edges_iter()))) + min_delay = float(min((topology.adj[u][v]['delay'] + for u, v in topology.edges()))) except KeyError: raise ValueError('All links must have a delay attribute') - for u, v in topology.edges_iter(): - delay = topology.edge[u][v]['delay'] + for u, v in topology.edges(): + delay = topology.adj[u][v]['delay'] weight = delay / min_delay - topology.edge[u][v]['weight'] = weight + topology.adj[u][v]['weight'] = weight def set_weights_constant(topology, weight=1.0, links=None): @@ -89,9 +89,9 @@ def set_weights_constant(topology, weight=1.0, links=None): >>> topology.add_edges_from([(1, 2), (5, 8), (4, 5), (1, 7)]) >>> fnss.set_weights_constant(topology, weight=1.0, links=[(1, 2), (5, 8), (4, 5)]) """ - edges = links or topology.edges_iter() + edges = links or topology.edges() for u, v in edges: - topology.edge[u][v]['weight'] = weight + topology.adj[u][v]['weight'] = weight def get_weights(topology): @@ -128,5 +128,5 @@ def clear_weights(topology): ---------- topology : Topology """ - for u, v in topology.edges_iter(): - topology.edge[u][v].pop('weight', None) + for u, v in topology.edges(): + topology.adj[u][v].pop('weight', None) diff --git a/fnss/topologies/datacenter.py b/fnss/topologies/datacenter.py index 8a3e278..dc0342e 100644 --- a/fnss/topologies/datacenter.py +++ b/fnss/topologies/datacenter.py @@ -36,13 +36,13 @@ def switches(self): """ Return the list of switch nodes in the topology """ - return [v for v in self.nodes_iter() if self.node[v]['type'] == 'switch'] + return [v for v in self.nodes() if self.node[v]['type'] == 'switch'] def hosts(self): """ Return the list of host nodes in the topology """ - return [v for v in self.nodes_iter() if self.node[v]['type'] == 'host'] + return [v for v in self.nodes() if self.node[v]['type'] == 'host'] def two_tier_topology(n_core, n_edge, n_hosts): @@ -87,8 +87,8 @@ def two_tier_topology(n_core, n_edge, n_hosts): for u in range(n_core): topo.node[u]['tier'] = 'core' topo.node[u]['type'] = 'switch' - for v in topo.edge[u]: - topo.edge[u][v]['type'] = 'core_edge' + for v in topo.adj[u]: + topo.adj[u][v]['type'] = 'core_edge' for u in range(n_core, n_core + n_edge): topo.node[u]['tier'] = 'edge' topo.node[u]['type'] = 'switch' @@ -157,8 +157,8 @@ def three_tier_topology(n_core, n_aggregation, n_edge, n_hosts): for u in range(n_core): topo.node[u]['tier'] = 'core' topo.node[u]['type'] = 'switch' - for v in topo.edge[u]: - topo.edge[u][v]['type'] = 'core_aggregation' + for v in topo.adj[u]: + topo.adj[u][v]['type'] = 'core_aggregation' for u in range(n_core, n_core + n_aggregation): topo.node[u]['tier'] = 'aggregation' topo.node[u]['type'] = 'switch' @@ -322,7 +322,7 @@ def fat_tree_topology(k): aggr_node = n_core + (core_node // (k // 2)) + (k * pod) topo.add_edge(core_node, aggr_node, type='core_aggregation') # Create hosts and connect them to edge switches - for u in [v for v in topo.nodes_iter() if topo.node[v]['layer'] == 'edge']: + for u in [v for v in topo.nodes() if topo.node[v]['layer'] == 'edge']: leaf_nodes = range(topo.number_of_nodes(), topo.number_of_nodes() + k // 2) topo.add_nodes_from(leaf_nodes, layer='leaf', type='host', diff --git a/fnss/topologies/parsers.py b/fnss/topologies/parsers.py index 34fc7a1..6d89812 100644 --- a/fnss/topologies/parsers.py +++ b/fnss/topologies/parsers.py @@ -276,7 +276,7 @@ def parse_rocketfuel_isp_latency(latencies_path, weights_path=None): raise ValueError("The weight file includes edge (%s, %s), " "which was not included in the latencies file" % (u_str, v_str)) - topology.edge[u][v]['weight'] = weight + topology.adj[u][v]['weight'] = weight return topology @@ -481,8 +481,8 @@ def parse_abilene(topology_path, links_path=None): raise ValueError('Invalid input file. '\ 'Parsing failed while trying to '\ 'parse a link from links_file') - topology.edge[u][v]['link_index'] = link_index - topology.edge[u][v]['link_type'] = link_type + topology.adj[u][v]['link_index'] = link_index + topology.adj[u][v]['link_type'] = link_type return topology @@ -628,7 +628,7 @@ def try_convert_int(value): topology.graph['type'] = 'topology_zoo' topology.graph['distance_unit'] = 'Km' topology.graph['link_bundling'] = topo_zoo_graph.is_multigraph() - for tv in topo_zoo_graph.nodes_iter(): + for tv in topo_zoo_graph.nodes(): v = try_convert_int(tv) topology.add_node(v) if 'label' in topo_zoo_graph.node[tv]: @@ -640,7 +640,7 @@ def try_convert_int(value): topology.node[v]['latitude'] = latitude except KeyError: pass - for tv, tu in topo_zoo_graph.edges_iter(): + for tv, tu in topo_zoo_graph.edges(): v = try_convert_int(tv) u = try_convert_int(tu) if u == v: @@ -655,20 +655,20 @@ def try_convert_int(value): lat_u = topo_zoo_graph.node[tu]['Latitude'] lon_u = topo_zoo_graph.node[tu]['Longitude'] length = geographical_distance(lat_v, lon_v, lat_u, lon_u) - topology.edge[v][u]['length'] = length + topology.adj[v][u]['length'] = length if topo_zoo_graph.is_multigraph(): - edge = topo_zoo_graph.edge[tv][tu] - topology.edge[v][u]['bundle'] = True if len(edge) > 1 else False + edge = topo_zoo_graph.adj[tv][tu] + topology.adj[v][u]['bundle'] = True if len(edge) > 1 else False capacity = 0 for edge_attr in list(edge.values()): if 'LinkSpeedRaw' in edge_attr: capacity += edge_attr['LinkSpeedRaw'] if capacity > 0: - topology.edge[v][u]['capacity'] = capacity + topology.adj[v][u]['capacity'] = capacity else: - if 'LinkSpeedRaw' in topo_zoo_graph.edge[tv][tu]: - topology.edge[v][u]['capacity'] = \ - topo_zoo_graph.edge[tv][tu]['LinkSpeedRaw'] + if 'LinkSpeedRaw' in topo_zoo_graph.adj[tv][tu]: + topology.adj[v][u]['capacity'] = \ + topo_zoo_graph.adj[tv][tu]['LinkSpeedRaw'] if len(nx.get_edge_attributes(topology, 'capacity')) > 0: topology.graph['capacity_unit'] = 'bps' return topology diff --git a/fnss/topologies/randmodels.py b/fnss/topologies/randmodels.py index 3db9e43..0aefdf1 100644 --- a/fnss/topologies/randmodels.py +++ b/fnss/topologies/randmodels.py @@ -117,7 +117,7 @@ def waxman_1_topology(n, alpha=0.4, beta=0.1, L=1.0, G.name = "waxman_1_topology(%s, %s, %s, %s)" % (n, alpha, beta, L) G.add_nodes_from(range(n)) - nodes = G.nodes() + nodes = list(G.nodes()) while nodes: u = nodes.pop() for v in nodes: @@ -189,12 +189,12 @@ def waxman_2_topology(n, alpha=0.4, beta=0.1, domain=(0, 0, 1, 1), G.add_nodes_from(range(n)) - for v in G.nodes_iter(): + for v in G.nodes(): G.node[v]['latitude'] = (ymin + (ymax - ymin)) * random.random() G.node[v]['longitude'] = (xmin + (xmax - xmin)) * random.random() l = {} - nodes = G.nodes() + nodes = list(G.nodes()) while nodes: u = nodes.pop() for v in nodes: @@ -260,9 +260,9 @@ def barabasi_albert_topology(n, m, m0, seed=None): """ def calc_pi(G): """Calculate BA Pi function for all nodes of the graph""" - degree = G.degree() + degree = dict(G.degree()) den = float(sum(degree.values())) - return {node: degree[node] / den for node in G.nodes_iter()} + return {node: degree[node] / den for node in G.nodes()} # input parameters if n < 1 or m < 1 or m0 < 1: @@ -346,9 +346,9 @@ def extended_barabasi_albert_topology(n, m, m0, p, q, seed=None): """ def calc_pi(G): """Calculate extended-BA Pi function for all nodes of the graph""" - degree = G.degree() + degree = dict(G.degree()) den = float(sum(degree.values()) + G.number_of_nodes()) - return {node: (degree[node] + 1) / den for node in G.nodes_iter()} + return {node: (degree[node] + 1) / den for node in G.nodes()} # input parameters if n < 1 or m < 1 or m0 < 1: @@ -393,10 +393,10 @@ def calc_pi(G): # rewire m links with probability q rewired_links = 0 while rewired_links < m: - i = random.choice(G.nodes()) # pick up node randomly (uniform) - if len(G.edge[i]) is 0: # if i has no edges, I cannot rewire + i = random.choice(list(G.nodes())) # pick up node randomly (uniform) + if len(G.adj[i]) is 0: # if i has no edges, I cannot rewire break - j = random.choice(list(G.edge[i].keys())) # node to be disconnected + j = random.choice(list(G.adj[i].keys())) # node to be disconnected k = random_from_pdf(pi) # new node to be connected if i is not k and j is not k and not G.has_edge(i, k): G.remove_edge(i, j) @@ -467,9 +467,9 @@ def calc_pi(G, beta): # validate input parameter if beta >= 1: raise ValueError('beta must be < 1') - degree = G.degree() + degree = dict(G.degree()) den = float(sum(degree.values()) - (G.number_of_nodes() * beta)) - return {node: (degree[node] - beta) / den for node in G.nodes_iter()} + return {node: (degree[node] - beta) / den for node in G.nodes()} def add_m_links(G, pi): """Add m links between existing nodes to the graph""" diff --git a/fnss/topologies/topology.py b/fnss/topologies/topology.py index 689cb75..cd920fd 100644 --- a/fnss/topologies/topology.py +++ b/fnss/topologies/topology.py @@ -106,7 +106,7 @@ def __init__(self, data=None, name="", **kwargs): **kwargs : keyword arguments, optional Attributes to add to graph as key=value pairs. """ - super(Topology, self).__init__(data=data, name=name, **kwargs) + super(Topology, self).__init__(data, name=name, **kwargs) def copy(self): """Return a copy of the topology. @@ -272,7 +272,7 @@ def __init__(self, data=None, name="", **kwargs): **kwargs : keyword arguments, optional Attributes to add to graph as key=value pairs. """ - super(DirectedTopology, self).__init__(data=data, name=name, **kwargs) + super(DirectedTopology, self).__init__(data, name=name, **kwargs) def copy(self): """Return a copy of the topology. @@ -442,7 +442,7 @@ def od_pairs_from_topology(topology): [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] """ if topology.is_directed(): - routes = nx.all_pairs_shortest_path_length(topology) + routes = dict(nx.all_pairs_shortest_path_length(topology)) return [(o, d) for o in routes for d in routes[o] if o != d] else: conn_comp = nx.connected_components(topology) @@ -489,13 +489,13 @@ def fan_in_out_capacities(topology): topology = topology.to_directed() fan_in = {} fan_out = {} - for node in topology.nodes_iter(): + for node in topology.nodes(): node_fan_in = 0 node_fan_out = 0 for predecessor in topology.predecessors(node): - node_fan_in += topology.edge[predecessor][node]['capacity'] + node_fan_in += topology.adj[predecessor][node]['capacity'] for successor in topology.successors(node): - node_fan_out += topology.edge[node][successor]['capacity'] + node_fan_out += topology.adj[node][successor]['capacity'] fan_in[node] = node_fan_in fan_out[node] = node_fan_out return fan_in, fan_out @@ -525,10 +525,10 @@ def rename_edge_attribute(topology, old_attr, new_attr): >>> topo.edges(data=True) [(1, 2, {'weight': 1}), (2, 3, {'weight': 2})] """ - for u, v in topology.edges_iter(): - if old_attr in topology.edge[u][v]: - topology.edge[u][v][new_attr] = topology.edge[u][v][old_attr] - del topology.edge[u][v][old_attr] + for u, v in topology.edges(): + if old_attr in topology.adj[u][v]: + topology.adj[u][v][new_attr] = topology.edge[u][v][old_attr] + del topology.adj[u][v][old_attr] def rename_node_attribute(topology, old_attr, new_attr): @@ -555,8 +555,8 @@ def rename_node_attribute(topology, old_attr, new_attr): >>> topo.edges(data=True) [(1, {'coordinates': (0, 0)}), (2, {'coordinates': (1, 1)})] """ - for v in topology.nodes_iter(): - if old_attr in topology.edge[v]: + for v in topology.nodes(): + if old_attr in topology.adj[v]: topology.node[v][new_attr] = topology.node[v][old_attr] del topology.node[v][old_attr] @@ -625,7 +625,7 @@ def read_topology(path, encoding='utf-8'): for prop in edge.findall('property'): name = prop.attrib['name'] value = util.xml_cast_type(prop.attrib['type'], prop.text) - topology.edge[u][v][name] = value + topology.adj[u][v][name] = value return topology @@ -651,7 +651,7 @@ def write_topology(topology, path, encoding='utf-8', prettyprint=True): prop.attrib['name'] = name prop.attrib['type'] = util.xml_type(value) prop.text = str(value) - for v in topology.nodes_iter(): + for v in topology.nodes(): node = ET.SubElement(head, 'node') node.attrib['id'] = str(v) node.attrib['id.type'] = util.xml_type(v) @@ -683,7 +683,7 @@ def write_topology(topology, path, encoding='utf-8', prettyprint=True): prop.attrib['name'] = name prop.attrib['type'] = util.xml_type(value) prop.text = str(value) - for u, v in topology.edges_iter(): + for u, v in topology.edges(): link = ET.SubElement(head, 'link') from_node = ET.SubElement(link, 'from') from_node.attrib['type'] = util.xml_type(u) @@ -691,7 +691,7 @@ def write_topology(topology, path, encoding='utf-8', prettyprint=True): to_node = ET.SubElement(link, 'to') to_node.attrib['type'] = util.xml_type(v) to_node.text = str(v) - for name, value in topology.edge[u][v].items(): + for name, value in topology.adj[u][v].items(): prop = ET.SubElement(link, 'property') prop.attrib['name'] = name prop.attrib['type'] = util.xml_type(value) diff --git a/fnss/traffic/trafficmatrices.py b/fnss/traffic/trafficmatrices.py index 53d7d0b..6ff34aa 100644 --- a/fnss/traffic/trafficmatrices.py +++ b/fnss/traffic/trafficmatrices.py @@ -420,20 +420,20 @@ def static_traffic_matrix(topology, mean, stddev, max_u=0.9, if o != d and d not in shortest_path[o]: od_pairs.remove((o, d)) else: - shortest_path = nx.all_pairs_dijkstra_path(topology, - weight='weight') - for u, v in topology.edges_iter(): - topology.edge[u][v]['load'] = 0.0 + shortest_path = dict(nx.all_pairs_dijkstra_path(topology, + weight='weight')) + for u, v in topology.edges(): + topology.adj[u][v]['load'] = 0.0 # Find max u for o, d in od_pairs: path = shortest_path[o][d] if len(path) > 1: for u, v in zip(path[:-1], path[1:]): - topology.edge[u][v]['load'] += assignments[(o, d)] + topology.adj[u][v]['load'] += assignments[(o, d)] # Calculate scaling - current_max_u = max((float(topology.edge[u][v]['load']) \ - / float(topology.edge[u][v]['capacity']) - for u, v in topology.edges_iter())) + current_max_u = max((float(topology.adj[u][v]['load']) \ + / float(topology.adj[u][v]['capacity']) + for u, v in topology.edges())) norm_factor = max_u / current_max_u for od_pair in assignments: assignments[od_pair] *= norm_factor @@ -551,8 +551,8 @@ def stationary_traffic_matrix(topology, mean, stddev, gamma, log_psi, n, weight='weight')) for node in origin_nodes) else: - shortest_path = nx.all_pairs_dijkstra_path(topology, - weight='weight') + shortest_path = dict(nx.all_pairs_dijkstra_path(topology, + weight='weight')) current_max_u = max((max(link_loads(topology, tm_sequence.get(i), shortest_path @@ -691,8 +691,8 @@ def sin_cyclostationary_traffic_matrix(topology, mean, stddev, gamma, log_psi, weight='weight')) for node in origin_nodes) else: - shortest_path = nx.all_pairs_dijkstra_path(topology, - weight='weight') + shortest_path = dict(nx.all_pairs_dijkstra_path(topology, + weight='weight')) current_max_u = max((max(link_loads(topology, tm_sequence.get(i), shortest_path @@ -843,14 +843,14 @@ def __nfur_func(topology, edges, betweenness): nfur = betweenness.copy() topology = topology.copy() for u, v in edges: - edge_attr = topology.edge[u][v] + edge_attr = topology.adj[u][v] topology.remove_edge(u, v) betw = nx.betweenness_centrality(topology, normalized=False, weight='weight') for node in betw.keys(): if betw[node] > nfur[node]: nfur[node] = betw[node] - topology.add_edge(u, v, edge_attr) + topology.add_edge(u, v, **edge_attr) return nfur @@ -898,15 +898,16 @@ def validate_traffic_matrix(topology, traffic_matrix, validate_load=False): od_pairs_topology = od_pairs_from_topology(topology) if validate_load: - shortest_path = nx.all_pairs_dijkstra_path(topology, weight='weight') + shortest_path = dict(nx.all_pairs_dijkstra_path(topology, + weight='weight')) for matrix in matrices: od_pairs_tm = matrix.od_pairs() # verify that OD pairs in TM are equal or subset of topology if not all(((o, d) in od_pairs_topology for o, d in od_pairs_tm)): return False if validate_load: - for u, v in topology.edges_iter(): - topology.edge[u][v]['load'] = 0 + for u, v in topology.edges(): + topology.adj[u][v]['load'] = 0 capacity_unit = capacity_units[topology.graph['capacity_unit']] volume_unit = capacity_units[matrix.attrib['volume_unit']] norm_factor = float(volume_unit) / float(capacity_unit) @@ -915,10 +916,10 @@ def validate_traffic_matrix(topology, traffic_matrix, validate_load=False): if len(path) <= 1: continue for u, v in zip(path[:-1], path[1:]): - topology.edge[u][v]['load'] += matrix.flow[o][d] - max_u = max((norm_factor * float(topology.edge[u][v]['load']) \ - / float(topology.edge[u][v]['capacity']) - for u, v in topology.edges_iter())) + topology.adj[u][v]['load'] += matrix.flow[o][d] + max_u = max((norm_factor * float(topology.adj[u][v]['load']) \ + / float(topology.adj[u][v]['capacity']) + for u, v in topology.edges())) if max_u > 1.0: return False return True @@ -976,9 +977,10 @@ def link_loads(topology, traffic_matrix, routing_matrix=None, ecmp=False): volume_unit = capacity_units[traffic_matrix.attrib['volume_unit']] norm_factor = float(volume_unit) / float(capacity_unit) if routing_matrix == None: - routing_matrix = nx.all_pairs_dijkstra_path(topology, weight='weight') - for u, v in topology.edges_iter(): - topology.edge[u][v]['load'] = 0 + routing_matrix = dict(nx.all_pairs_dijkstra_path(topology, + weight='weight')) + for u, v in topology.edges(): + topology.adj[u][v]['load'] = 0 od_pairs = traffic_matrix.od_pairs() def process_path(path, number_of_paths=1): @@ -986,9 +988,9 @@ def process_path(path, number_of_paths=1): return for u, v in zip(path[:-1], path[1:]): if not ecmp: - topology.edge[u][v]['load'] += traffic_matrix.flow[o][d] + topology.adj[u][v]['load'] += traffic_matrix.flow[o][d] else: - topology.edge[u][v]['load'] += \ + topology.adj[u][v]['load'] += \ traffic_matrix.flow[o][d] / float(number_of_paths) for o, d in od_pairs: @@ -1003,9 +1005,9 @@ def process_path(path, number_of_paths=1): for p in path: process_path(p, len(path)) - return {(u, v): norm_factor * float(topology.edge[u][v]['load']) \ - / float(topology.edge[u][v]['capacity']) - for u, v in topology.edges_iter()} + return {(u, v): norm_factor * float(topology.adj[u][v]['load']) \ + / float(topology.adj[u][v]['capacity']) + for u, v in topology.edges()} def read_traffic_matrix(path, encoding='utf-8'): diff --git a/setup.py b/setup.py index ac45039..5f3435a 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ # Packages required to run FNSS requires = [ - 'networkx (>=1.6,<2.0)', + 'networkx (>=1.6)', 'numpy (>=1.4)', 'mako (>=0.4)' ] diff --git a/test/test/test_adapters/test_jfed.py b/test/test/test_adapters/test_jfed.py index 6f6ae39..1094f4a 100644 --- a/test/test/test_adapters/test_jfed.py +++ b/test/test/test_adapters/test_jfed.py @@ -23,8 +23,8 @@ def test_to_from(self): t_out = fnss.from_jfed(f) self.assertEqual(t_in.number_of_nodes(), t_out.number_of_nodes()) self.assertEqual(t_in.number_of_edges(), t_out.number_of_edges()) - self.assertEqual(set(t_in.degree().values()), - set(t_out.degree().values())) + self.assertEqual(set(dict(t_in.degree()).values()), + set(dict(t_out.degree()).values())) @unittest.skipIf(TMP_DIR is None, "Temp folder not present") def test_to_jfed(self): diff --git a/test/test/test_netconfig/test_buffers.py b/test/test/test_netconfig/test_buffers.py index 919d184..0e63814 100644 --- a/test/test/test_netconfig/test_buffers.py +++ b/test/test/test_netconfig/test_buffers.py @@ -8,9 +8,9 @@ class Test(unittest.TestCase): def setUpClass(cls): cls.topo = fnss.glp_topology(n=100, m=1, m0=10, p=0.2, beta=-2, seed=1) fnss.set_capacities_random_uniform(cls.topo, [10, 20, 40]) - odd_links = [(u, v) for (u, v) in cls.topo.edges_iter() + odd_links = [(u, v) for (u, v) in cls.topo.edges() if (u + v) % 2 == 1] - even_links = [(u, v) for (u, v) in cls.topo.edges_iter() + even_links = [(u, v) for (u, v) in cls.topo.edges() if (u + v) % 2 == 0] fnss.set_delays_constant(cls.topo, 2, 'ms', odd_links) fnss.set_delays_constant(cls.topo, 5, 'ms', even_links) @@ -28,42 +28,42 @@ def tearDown(self): def test_buffer_sizes_bw_delay_prod(self): fnss.set_buffer_sizes_bw_delay_prod(self.topo) - self.assertTrue(all(self.topo.edge[u][v]['buffer'] is not None - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['buffer'] is not None + for (u, v) in self.topo.edges())) def test_buffer_sizes_bw_delay_prod_unused_links(self): topo = fnss.Topology() - topo.add_edge(1, 2, {'weight': 100}) - topo.add_edge(2, 3, {'weight': 1}) - topo.add_edge(3, 1, {'weight': 1}) + topo.add_edge(1, 2, weight=100) + topo.add_edge(2, 3, weight=1) + topo.add_edge(3, 1, weight=1) fnss.set_capacities_constant(topo, 10) fnss.set_delays_constant(topo, 2) fnss.set_buffer_sizes_bw_delay_prod(topo) - self.assertTrue(all((topo.edge[u][v]['buffer'] is not None - for (u, v) in topo.edges_iter()))) + self.assertTrue(all((topo.adj[u][v]['buffer'] is not None + for (u, v) in topo.edges()))) def test_buffer_sizes_bw_delay_prod_unused_links_no_return_path(self): topo = fnss.DirectedTopology() - topo.add_edge(1, 2, {'weight': 100}) - topo.add_edge(1, 3, {'weight': 1}) - topo.add_edge(3, 2, {'weight': 1}) + topo.add_edge(1, 2, weight=100) + topo.add_edge(1, 3, weight=1) + topo.add_edge(3, 2, weight=1) fnss.set_capacities_constant(topo, 10) fnss.set_delays_constant(topo, 2) self.assertRaises(ValueError, fnss.set_buffer_sizes_bw_delay_prod, topo) def test_buffer_sizes_bw_delay_prod_no_return_path(self): topo = fnss.DirectedTopology() - topo.add_edge(1, 2, {'weight': 1}) - topo.add_edge(1, 3, {'weight': 1}) - topo.add_edge(3, 2, {'weight': 1}) + topo.add_edge(1, 2, weight=1) + topo.add_edge(1, 3, weight=1) + topo.add_edge(3, 2, weight=1) fnss.set_capacities_constant(topo, 10) fnss.set_delays_constant(topo, 2) self.assertRaises(ValueError, fnss.set_buffer_sizes_bw_delay_prod, topo) def test_buffers_size_link_bandwidth(self): fnss.set_buffer_sizes_link_bandwidth(self.topo) - self.assertTrue(all(self.topo.edge[u][v]['buffer'] is not None - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['buffer'] is not None + for (u, v) in self.topo.edges())) def test_buffers_size_link_bandwidth_default_size(self): topo = fnss.line_topology(4) @@ -71,17 +71,17 @@ def test_buffers_size_link_bandwidth_default_size(self): fnss.set_capacities_constant(topo, 16, 'Mbps', [(1, 2)]) fnss.set_buffer_sizes_link_bandwidth(topo, buffer_unit='bytes', default_size=10) self.assertEquals(topo.graph['buffer_unit'], 'bytes') - self.assertEquals(topo.edge[0][1]['buffer'], 1000000) - self.assertEquals(topo.edge[1][2]['buffer'], 2000000) - self.assertEquals(topo.edge[2][3]['buffer'], 10) + self.assertEquals(topo.adj[0][1]['buffer'], 1000000) + self.assertEquals(topo.adj[1][2]['buffer'], 2000000) + self.assertEquals(topo.adj[2][3]['buffer'], 10) fnss.clear_buffer_sizes(topo) - self.assertTrue('capacity' not in topo.edge[2][3]) + self.assertTrue('capacity' not in topo.adj[2][3]) self.assertRaises(ValueError, fnss.set_buffer_sizes_link_bandwidth, topo) def test_buffers_size_constant(self): fnss.set_buffer_sizes_constant(self.topo, 65000, buffer_unit='bytes') - self.assertTrue(all(self.topo.edge[u][v]['buffer'] == 65000 - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['buffer'] == 65000 + for (u, v) in self.topo.edges())) def test_buffers_size_constant_unit_mismatch(self): # If I try to set buffer sizes to some interfaces using a unit and some diff --git a/test/test/test_netconfig/test_capacities.py b/test/test/test_netconfig/test_capacities.py index 18dd3d5..7c1ea33 100644 --- a/test/test/test_netconfig/test_capacities.py +++ b/test/test/test_netconfig/test_capacities.py @@ -27,55 +27,55 @@ def test_capacities_constant(self): fnss.set_capacities_constant(self.topo, 2, 'Mbps', odd_links) fnss.set_capacities_constant(self.topo, 5000, 'Kbps', even_links) self.assertEqual('Mbps', self.topo.graph['capacity_unit']) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in [2, 5] - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in [2, 5] + for (u, v) in self.topo.edges())) def test_capacities_edge_betweenness(self): fnss.set_capacities_edge_betweenness(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) @unittest.skipUnless(package_available('scipy'), 'Requires Scipy') def test_capacities_edge_communicability(self): fnss.set_capacities_edge_communicability(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) @unittest.skipUnless(package_available('scipy'), 'Requires Scipy') def test_capacities_edge_communicability_one_capacity(self): fnss.set_capacities_edge_communicability(self.topo, [10]) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] == 10 - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] == 10 + for (u, v) in self.topo.edges())) def test_capacities_betweenness_gravity(self): fnss.set_capacities_betweenness_gravity(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_communicability_gravity(self): fnss.set_capacities_communicability_gravity(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_degree_gravity(self): fnss.set_capacities_degree_gravity(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_eigenvector_gravity(self): fnss.set_capacities_eigenvector_gravity(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_eigenvector_gravity_one_capacity(self): fnss.set_capacities_eigenvector_gravity(self.topo, [10]) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] == 10 - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] == 10 + for (u, v) in self.topo.edges())) def test_capacities_pagerank_gravity(self): fnss.set_capacities_pagerank_gravity(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_random(self): self.assertRaises(ValueError, fnss.set_capacities_random, @@ -83,13 +83,13 @@ def test_capacities_random(self): self.assertRaises(ValueError, fnss.set_capacities_random, self.topo, {10: 0.3, 20: 0.8}) fnss.set_capacities_random(self.topo, {10: 0.3, 20: 0.7}) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in (10, 20) - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in (10, 20) + for (u, v) in self.topo.edges())) def test_capacities_random_uniform(self): fnss.set_capacities_random_uniform(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_random_power_law(self): self.assertRaises(ValueError, fnss.set_capacities_random_power_law, @@ -97,8 +97,8 @@ def test_capacities_random_power_law(self): self.assertRaises(ValueError, fnss.set_capacities_random_power_law, self.topo, self.capacities, alpha=-0.2) fnss.set_capacities_random_power_law(self.topo, self.capacities) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_random_zipf(self): self.assertRaises(ValueError, fnss.set_capacities_random_zipf, @@ -106,12 +106,12 @@ def test_capacities_random_zipf(self): self.assertRaises(ValueError, fnss.set_capacities_random_zipf, self.topo, self.capacities, alpha=-0.2) fnss.set_capacities_random_zipf(self.topo, self.capacities, alpha=0.8) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) fnss.clear_capacities(self.topo) fnss.set_capacities_random_zipf(self.topo, self.capacities, alpha=1.2) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) def test_capacities_random_zipf_mandlebrot(self): self.assertRaises(ValueError, @@ -126,11 +126,11 @@ def test_capacities_random_zipf_mandlebrot(self): # test with alpha=0.8 and q=2.5 fnss.set_capacities_random_zipf_mandelbrot(self.topo, self.capacities, alpha=0.8, q=2.5) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) fnss.clear_capacities(self.topo) # test with alpha=1.2 and q=0.4 fnss.set_capacities_random_zipf_mandelbrot(self.topo, self.capacities, alpha=1.2, q=0.4) - self.assertTrue(all(self.topo.edge[u][v]['capacity'] in self.capacities - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['capacity'] in self.capacities + for (u, v) in self.topo.edges())) diff --git a/test/test/test_netconfig/test_delays.py b/test/test/test_netconfig/test_delays.py index df8dfe8..e960e9f 100644 --- a/test/test/test_netconfig/test_delays.py +++ b/test/test/test_netconfig/test_delays.py @@ -23,15 +23,15 @@ def tearDown(self): def test_delays_constant(self): topo = fnss.k_ary_tree_topology(3, 4) self.assertRaises(ValueError, fnss.set_delays_constant, topo, 2, 'Km') - odd_links = [(u, v) for (u, v) in topo.edges_iter() + odd_links = [(u, v) for (u, v) in topo.edges() if (u + v) % 2 == 1] - even_links = [(u, v) for (u, v) in topo.edges_iter() + even_links = [(u, v) for (u, v) in topo.edges() if (u + v) % 2 == 0] fnss.set_delays_constant(topo, 2, 's', odd_links) fnss.set_delays_constant(topo, 5000000, 'us', even_links) self.assertEqual('s', topo.graph['delay_unit']) - self.assertTrue(all(topo.edge[u][v]['delay'] in [2, 5] - for (u, v) in topo.edges_iter())) + self.assertTrue(all(topo.adj[u][v]['delay'] in [2, 5] + for (u, v) in topo.edges())) def test_delays_geo_distance(self): specific_delay = 1.2 @@ -39,8 +39,8 @@ def test_delays_geo_distance(self): G_len = fnss.waxman_1_topology(100, L=L) G_xy = fnss.waxman_2_topology(100, domain=(0, 0, 3, 4)) # leave only node coordinate to trigger failure - for u, v in G_xy.edges_iter(): - del G_xy.edge[u][v]['length'] + for u, v in G_xy.edges(): + del G_xy.adj[u][v]['length'] self.assertRaises(ValueError, fnss.set_delays_geo_distance, G_len, 2, delay_unit='Km') self.assertRaises(ValueError, fnss.set_delays_geo_distance, @@ -57,10 +57,10 @@ def test_delays_geo_distance_conversions(self): topology.add_edge(1, 2, length=2000) specific_delay = 1.2 fnss.set_delays_geo_distance(topology, specific_delay, None, 'us') - self.assertAlmostEqual(topology.edge[1][2]['delay'], 2400) + self.assertAlmostEqual(topology.adj[1][2]['delay'], 2400) fnss.clear_delays(topology) fnss.set_delays_geo_distance(topology, specific_delay, None, 's') - self.assertAlmostEqual(topology.edge[1][2]['delay'], 0.0024) + self.assertAlmostEqual(topology.adj[1][2]['delay'], 0.0024) def test_delays_geo_distance_conversions_partial_assignments(self): topology = fnss.Topology(distance_unit='m') @@ -74,9 +74,9 @@ def test_delays_geo_distance_conversions_partial_assignments(self): 3, 's', links=[(2, 3), (3, 4)]) self.assertEquals(topology.graph['distance_unit'], 'm') self.assertEquals(topology.graph['delay_unit'], 'us') - self.assertAlmostEqual(topology.edge[1][2]['delay'], 2400) - self.assertAlmostEqual(topology.edge[2][3]['delay'], 3600) - self.assertAlmostEqual(topology.edge[3][4]['delay'], 3000000) + self.assertAlmostEqual(topology.adj[1][2]['delay'], 2400) + self.assertAlmostEqual(topology.adj[2][3]['delay'], 3600) + self.assertAlmostEqual(topology.adj[3][4]['delay'], 3000000) def test_delays_geo_distance_conversions_defaults(self): topology = fnss.Topology(distance_unit='m') @@ -87,9 +87,9 @@ def test_delays_geo_distance_conversions_defaults(self): fnss.set_delays_geo_distance(topology, specific_delay, 3, 's', None) self.assertEquals(topology.graph['distance_unit'], 'm') self.assertEquals(topology.graph['delay_unit'], 's') - self.assertAlmostEqual(topology.edge[1][2]['delay'], 0.0024) - self.assertAlmostEqual(topology.edge[2][3]['delay'], 0.0036) - self.assertAlmostEqual(topology.edge[3][4]['delay'], 3) + self.assertAlmostEqual(topology.adj[1][2]['delay'], 0.0024) + self.assertAlmostEqual(topology.adj[2][3]['delay'], 0.0036) + self.assertAlmostEqual(topology.adj[3][4]['delay'], 3) def test_clear_delays(self): topo = fnss.star_topology(12) diff --git a/test/test/test_netconfig/test_nodeconfig.py b/test/test/test_netconfig/test_nodeconfig.py index ca00fa6..6ce4182 100644 --- a/test/test/test_netconfig/test_nodeconfig.py +++ b/test/test/test_netconfig/test_nodeconfig.py @@ -45,7 +45,7 @@ def test_add_stack_mixed_attr(self): self.assertEqual(fnss.get_stack(self.topo, 1, data=True), ('s_name', {'att1': 'val1', 'att2': 'val2'})) def test_add_get_remove_stack(self): - for v in self.topo.nodes_iter(): + for v in self.topo.nodes(): self.assertIsNone(fnss.get_stack(self.topo, v)) fnss.add_stack(self.topo, 12, self.stack_1_name, self.stack_1_props) self.assertEqual(2, len(fnss.get_stack(self.topo, 12))) @@ -62,14 +62,14 @@ def test_add_get_remove_stack(self): self.assertIsNone(fnss.get_stack(self.topo, 12)) def test_clear_stacks(self): - for v in self.topo.nodes_iter(): + for v in self.topo.nodes(): fnss.add_stack(self.topo, v, self.stack_1_name, self.stack_1_props) fnss.clear_stacks(self.topo) - for v in self.topo.nodes_iter(): + for v in self.topo.nodes(): self.assertIsNone(fnss.get_stack(self.topo, v)) def test_add_get_remove_applications(self): - for v in self.topo.nodes_iter(): + for v in self.topo.nodes(): self.assertEqual([], fnss.get_application_names(self.topo, v)) fnss.add_application(self.topo, 10, self.app_1_name, self.app_1_props) self.assertEqual([self.app_1_name], diff --git a/test/test/test_netconfig/test_weights.py b/test/test/test_netconfig/test_weights.py index 5f6bd9a..7057616 100644 --- a/test/test/test_netconfig/test_weights.py +++ b/test/test/test_netconfig/test_weights.py @@ -11,9 +11,9 @@ def setUpClass(cls): # set up topology used for all traffic matrix tests cls.topo = fnss.k_ary_tree_topology(3, 4) cls.capacities = [10, 20] - cls.odd_links = [(u, v) for (u, v) in cls.topo.edges_iter() + cls.odd_links = [(u, v) for (u, v) in cls.topo.edges() if (u + v) % 2 == 1] - cls.even_links = [(u, v) for (u, v) in cls.topo.edges_iter() + cls.even_links = [(u, v) for (u, v) in cls.topo.edges() if (u + v) % 2 == 0] fnss.set_capacities_random_uniform(cls.topo, cls.capacities) fnss.set_delays_constant(cls.topo, 3, 'ms', cls.odd_links) @@ -32,18 +32,18 @@ def tearDown(self): def test_weights_constant(self): fnss.set_weights_constant(self.topo, 2, self.odd_links) fnss.set_weights_constant(self.topo, 5, self.even_links) - self.assertTrue(all(self.topo.edge[u][v]['weight'] in [2, 5] - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['weight'] in [2, 5] + for (u, v) in self.topo.edges())) def test_weights_inverse_capacity(self): fnss.set_weights_inverse_capacity(self.topo) - self.assertTrue(all(self.topo.edge[u][v]['weight'] in [1, 2] - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['weight'] in [1, 2] + for (u, v) in self.topo.edges())) def test_weights_delays(self): fnss.set_weights_delays(self.topo) - self.assertTrue(all(self.topo.edge[u][v]['weight'] in [1, 4] - for (u, v) in self.topo.edges_iter())) + self.assertTrue(all(self.topo.adj[u][v]['weight'] in [1, 4] + for (u, v) in self.topo.edges())) def test_clear_weights(self): # create new topology to avoid parameters pollution diff --git a/test/test/test_topologies/test_parsers.py b/test/test/test_topologies/test_parsers.py index d2fffd5..13981ee 100644 --- a/test/test/test_topologies/test_parsers.py +++ b/test/test/test_topologies/test_parsers.py @@ -18,11 +18,11 @@ def test_parse_abilene(self): topology = fnss.parse_abilene(abilene_topo_file, abilene_links_file) self.assertEquals(12, topology.number_of_nodes()) self.assertEquals(30, topology.number_of_edges()) - self.assertTrue(all('link_index' in topology.edge[u][v] - and 'link_type' in topology.edge[u][v]) - for u, v in topology.edges_iter()) - self.assertTrue(all(topology.edge[u][v]['length'] >= 0 - for u, v in topology.edges_iter())) + self.assertTrue(all('link_index' in topology.adj[u][v] + and 'link_type' in topology.adj[u][v]) + for u, v in topology.edges()) + self.assertTrue(all(topology.adj[u][v]['length'] >= 0 + for u, v in topology.edges())) @unittest.skipIf(RES_DIR is None, "Resources folder not present") def test_parse_rockefuel_isp_map(self): @@ -37,7 +37,7 @@ def test_parse_rocketfuel_isp_latency(self): topology = fnss.parse_rocketfuel_isp_latency(rocketfuel_file) self.assertEquals(108, topology.number_of_nodes()) self.assertEquals(306, topology.number_of_edges()) - for _, _, data in topology.edges_iter(data=True): + for _, _, data in topology.edges(data=True): self.assertTrue('delay' in data) self.assertIsInstance(data['delay'], int) self.assertGreaterEqual(data['delay'], 0) @@ -49,7 +49,7 @@ def test_parse_rocketfuel_isp_latency_with_weights(self): topology = fnss.parse_rocketfuel_isp_latency(latencies_file, weights_file) self.assertEquals(108, topology.number_of_nodes()) self.assertEquals(306, topology.number_of_edges()) - for _, _, data in topology.edges_iter(data=True): + for _, _, data in topology.edges(data=True): self.assertTrue('delay' in data) self.assertTrue('weight' in data) self.assertIsInstance(data['delay'], int) @@ -63,7 +63,7 @@ def test_parse_rocketfuel_isp_latency_overseas_nodes(self): topology = fnss.parse_rocketfuel_isp_latency(rocketfuel_file) self.assertEquals(315, topology.number_of_nodes()) self.assertEquals(1944, topology.number_of_edges()) - for _, _, data in topology.edges_iter(data=True): + for _, _, data in topology.edges(data=True): self.assertTrue('delay' in data) self.assertIsInstance(data['delay'], int) self.assertGreaterEqual(data['delay'], 0) @@ -75,7 +75,7 @@ def test_parse_rocketfuel_isp_latency_with_weights_overseas_nodes(self): topology = fnss.parse_rocketfuel_isp_latency(latencies_file, weights_file) self.assertEquals(315, topology.number_of_nodes()) self.assertEquals(1944, topology.number_of_edges()) - for _, _, data in topology.edges_iter(data=True): + for _, _, data in topology.edges(data=True): self.assertTrue('delay' in data) self.assertTrue('weight' in data) self.assertIsInstance(data['delay'], int) @@ -103,7 +103,7 @@ def test_parse_caida_as_relationships(self): topology = fnss.parse_caida_as_relationships(caida_file) self.assertEqual(41203, topology.number_of_nodes()) self.assertEqual(121309, topology.number_of_edges()) - self.assertEqual('customer', topology.edge[263053][28163]['type']) + self.assertEqual('customer', topology.adj[263053][28163]['type']) @unittest.skipIf(RES_DIR is None, "Resources folder not present") def test_parse_inet(self): @@ -120,11 +120,11 @@ def test_parse_topology_zoo(self): self.assertFalse(topology.is_multigraph()) self.assertEqual(34, topology.number_of_nodes()) self.assertEqual(46, topology.number_of_edges()) - self.assertEqual(1000000000.0, topology.edge[4][15]['capacity']) + self.assertEqual(1000000000.0, topology.adj[4][15]['capacity']) self.assertEquals('bps', topology.graph['capacity_unit']) - self.assertTrue(all(topology.edge[u][v]['length'] >= 0 - for u, v in topology.edges_iter() - if 'length' in topology.edge[u][v])) + self.assertTrue(all(topology.adj[u][v]['length'] >= 0 + for u, v in topology.edges() + if 'length' in topology.adj[u][v])) @unittest.skipIf(RES_DIR is None, "Resources folder not present") def test_parse_topology_zoo_multigraph(self): @@ -136,7 +136,7 @@ def test_parse_topology_zoo_multigraph(self): self.assertEqual(61, topology.number_of_nodes()) self.assertEqual(75, topology.number_of_edges()) self.assertEquals('bps', topology.graph['capacity_unit']) - self.assertEquals(2000000000, topology.edge[37][58]['capacity']) + self.assertEquals(2000000000, topology.adj[37][58]['capacity']) bundled_links = [(43, 18), (49, 32), (41, 18), (4, 7), (6, 55), (9, 58), (58, 37), (10, 55), (14, 57), (14, 35), (18, 41), (18, 43), @@ -144,7 +144,7 @@ def test_parse_topology_zoo_multigraph(self): for u, v in topology.edges(): print(u, v) self.assertEquals((u, v) in bundled_links, - topology.edge[u][v]['bundle']) + topology.adj[u][v]['bundle']) @unittest.skipIf(RES_DIR is None, "Resources folder not present") def test_parse_topology_zoo_multigraph_directed_topology(self): @@ -167,9 +167,9 @@ def test_parse_brite_as(self): self.assertEqual(980, topology.node[851]['latitude']) self.assertEqual('AS_NODE', topology.node[851]['type']) # 1478 716 230 212.11553455605272 0.7075412636166207 0.0011145252848059164 716 230 E_AS U - self.assertEquals(1478, topology.edge[716][230]['id']) + self.assertEquals(1478, topology.adj[716][230]['id']) self.assertAlmostEquals(212.11553455605272, - topology.edge[716][230]['length'], 0.01) + topology.adj[716][230]['length'], 0.01) @unittest.skipIf(RES_DIR is None, "Resources folder not present") def test_parse_brite_router(self): diff --git a/test/test/test_topologies/test_simplemodels.py b/test/test/test_topologies/test_simplemodels.py index c4ae13b..8e89aac 100644 --- a/test/test/test_topologies/test_simplemodels.py +++ b/test/test/test_topologies/test_simplemodels.py @@ -33,7 +33,7 @@ def test_K_ary_tree_connectivity(k, h): self.assertEquals(sum(k ** d for d in range(1, h + 1)), G.number_of_edges()) degree = G.degree() - for v in G.nodes_iter(): + for v in G.nodes(): v_type = G.node[v]['type'] v_depth = G.node[v]['depth'] self.assertEqual(expected_degree[v_type], degree[v]) @@ -127,10 +127,10 @@ def test_chord_connectivity(m, r): n = 2 ** m self.assertEqual(len(G), n) if r <= 2: - for i in G.nodes_iter(): - self.assertEqual(len(G.edge[i]), m) + for i in G.nodes(): + self.assertEqual(len(G.adj[i]), m) else: - for i in G.nodes_iter(): + for i in G.nodes(): for j in range(i + 1, i + r + 1): self.assertTrue(G.has_edge(i, j % n)) test_chord_connectivity(2, 1) diff --git a/test/test/test_topologies/test_topology.py b/test/test/test_topologies/test_topology.py index 5ccefa8..40c7ef0 100644 --- a/test/test/test_topologies/test_topology.py +++ b/test/test/test_topologies/test_topology.py @@ -50,7 +50,7 @@ def test_base_topology_class(self): capacities = topology.capacities() delays = topology.delays() buffer_sizes = topology.buffers() - for e in topology.edges_iter(): + for e in topology.edges(): self.assertEqual(weight, weights[e]) self.assertEqual(capacity, capacities[e]) self.assertEqual(delay, delays[e]) @@ -128,10 +128,10 @@ def test_read_write_topology(self): self.assertEquals(len(fnss.get_application_names(self.G, 2)), len(fnss.get_application_names(read_topo, 2))) self.assertEquals('fnss', fnss.get_application_properties(read_topo, 2, 'server')['user-agent']) - self.assertEquals([2, 4, 6], [ v for v in read_topo.nodes_iter() + self.assertEquals([2, 4, 6], [ v for v in read_topo.nodes() if fnss.get_stack(read_topo, v) is not None and fnss.get_stack(read_topo, v)[0] == 'tcp']) - self.assertEquals([2, 4], [ v for v in read_topo.nodes_iter() + self.assertEquals([2, 4], [ v for v in read_topo.nodes() if 'client' in fnss.get_application_names(read_topo, v)]) - self.assertEquals([2], [ v for v in read_topo.nodes_iter() + self.assertEquals([2], [ v for v in read_topo.nodes() if 'server' in fnss.get_application_names(read_topo, v)])