Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DOC: consistent spelling of neighbor and rename vars #7162

Merged
merged 2 commits into from
Dec 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions networkx/algorithms/approximation/treewidth.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def treewidth_min_fill_in(G):
"""Returns a treewidth decomposition using the Minimum Fill-in heuristic.

The heuristic chooses a node from the graph, where the number of edges
added turning the neighbourhood of the chosen node into clique is as
added turning the neighborhood of the chosen node into clique is as
small as possible.

Parameters
Expand All @@ -89,7 +89,7 @@ class MinDegreeHeuristic:
"""Implements the Minimum Degree heuristic.

The heuristic chooses the nodes according to their degree
(number of neighbours), i.e., first the node with the lowest degree is
(number of neighbors), i.e., first the node with the lowest degree is
chosen, then the graph is updated and the corresponding node is
removed. Next, a new node with the lowest degree is chosen, and so on.
"""
Expand Down Expand Up @@ -136,7 +136,7 @@ def min_fill_in_heuristic(graph):
"""Implements the Minimum Degree heuristic.

Returns the node from the graph, where the number of edges added when
turning the neighbourhood of the chosen node into clique is as small as
turning the neighborhood of the chosen node into clique is as small as
possible. This algorithm chooses the nodes using the Minimum Fill-In
heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses
additional constant memory."""
Expand Down Expand Up @@ -201,7 +201,7 @@ def treewidth_decomp(G, heuristic=min_fill_in_heuristic):
# get first node from heuristic
elim_node = heuristic(graph)
while elim_node is not None:
# connect all neighbours with each other
# connect all neighbors with each other
nbrs = graph[elim_node]
for u, v in itertools.permutations(nbrs, 2):
if v not in graph[u]:
Expand Down
2 changes: 1 addition & 1 deletion networkx/algorithms/centrality/voterank_alg.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ def voterank(G, number_of_nodes=None):
"""Select a list of influential nodes in a graph using VoteRank algorithm

VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
voting scheme. With VoteRank, all nodes vote for each of its in-neighbours
voting scheme. With VoteRank, all nodes vote for each of its in-neighbors
and the node with the highest votes is elected iteratively. The voting
ability of out-neighbors of elected nodes is decreased in subsequent turns.

Expand Down
10 changes: 4 additions & 6 deletions networkx/algorithms/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,20 +67,18 @@ def triangles(G, nodes=None):

# dict used to avoid visiting the same nodes twice
# this allows calculating/counting each triangle only once
later_neighbors = {}
later_nbrs = {}

# iterate over the nodes in a graph
for node, neighbors in G.adjacency():
later_neighbors[node] = {
n for n in neighbors if n not in later_neighbors and n != node
}
later_nbrs[node] = {n for n in neighbors if n not in later_nbrs and n != node}

# instantiate Counter for each node to include isolated nodes
# add 1 to the count if a nodes neighbor's neighbor is also a neighbor
triangle_counts = Counter(dict.fromkeys(G, 0))
for node1, neighbors in later_neighbors.items():
for node1, neighbors in later_nbrs.items():
for node2 in neighbors:
third_nodes = neighbors & later_neighbors[node2]
third_nodes = neighbors & later_nbrs[node2]
m = len(third_nodes)
triangle_counts[node1] += m
triangle_counts[node2] += m
Expand Down
8 changes: 4 additions & 4 deletions networkx/algorithms/coloring/greedy_coloring.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ def greedy_color(G, strategy="largest_first", interchange=False):
"""Color a graph using various strategies of greedy graph coloring.

Attempts to color a graph using as few colors as possible, where no
neighbours of a node can have same color as the node itself. The
neighbors of a node can have same color as the node itself. The
given strategy determines the order in which nodes are colored.

The strategies are described in [1]_, and smallest-last is based on
Expand Down Expand Up @@ -371,11 +371,11 @@ def greedy_color(G, strategy="largest_first", interchange=False):
if interchange:
return _greedy_coloring_with_interchange(G, nodes)
for u in nodes:
# Set to keep track of colors of neighbours
neighbour_colors = {colors[v] for v in G[u] if v in colors}
# Set to keep track of colors of neighbors
nbr_colors = {colors[v] for v in G[u] if v in colors}
# Find the first unused color.
for color in itertools.count():
if color not in neighbour_colors:
if color not in nbr_colors:
break
# Assign the new color to the current node.
colors[u] = color
Expand Down
6 changes: 3 additions & 3 deletions networkx/algorithms/coloring/tests/test_coloring.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,13 +446,13 @@ def color_remaining_nodes(
)

for u in node_iterator:
# Set to keep track of colors of neighbours
neighbour_colors = {
# Set to keep track of colors of neighbors
nbr_colors = {
aux_colored_nodes[v] for v in G[u] if v in aux_colored_nodes
}
# Find the first unused color.
for color in itertools.count():
if color not in neighbour_colors:
if color not in nbr_colors:
break
aux_colored_nodes[u] = color
color_assignments.append((u, color))
Expand Down
4 changes: 2 additions & 2 deletions networkx/algorithms/community/asyn_fluid.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def asyn_fluidc(G, k, max_iter=100, seed=None):
The algorithm proceeds as follows. First each of the initial k communities
is initialized in a random vertex in the graph. Then the algorithm iterates
over all vertices in a random order, updating the community of each vertex
based on its own community and the communities of its neighbours. This
based on its own community and the communities of its neighbors. This
process is performed several times until convergence.
At all times, each community has a total density of 1, which is equally
distributed among the vertices it contains. If a vertex changes of
Expand Down Expand Up @@ -102,7 +102,7 @@ def asyn_fluidc(G, k, max_iter=100, seed=None):
com_counter.update({communities[vertex]: density[communities[vertex]]})
except KeyError:
pass
# Gather neighbour vertex communities
# Gather neighbor vertex communities
for v in G[vertex]:
try:
com_counter.update({communities[v]: density[communities[v]]})
Expand Down
2 changes: 1 addition & 1 deletion networkx/algorithms/community/label_propagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ def _most_frequent_labels(node, labeling, G):
# accordingly, hence the immediate if statement.
return {labeling[node]}

# Compute the frequencies of all neighbours of node
# Compute the frequencies of all neighbors of node
freqs = Counter(labeling[q] for q in G[node])
max_freq = max(freqs.values())
return {label for label, freq in freqs.items() if freq == max_freq}
Expand Down
4 changes: 2 additions & 2 deletions networkx/algorithms/community/louvain.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None):
out_degrees = dict(G.out_degree(weight="weight"))
Stot_in = list(in_degrees.values())
Stot_out = list(out_degrees.values())
# Calculate weights for both in and out neighbours without considering self-loops
# Calculate weights for both in and out neighbors without considering self-loops
nbrs = {}
for u in G:
nbrs[u] = defaultdict(float)
Expand Down Expand Up @@ -327,7 +327,7 @@ def _neighbor_weights(nbrs, node2com):
Parameters
----------
nbrs : dictionary
Dictionary with nodes' neighbours as keys and their edge weight as value.
Dictionary with nodes' neighbors as keys and their edge weight as value.
node2com : dictionary
Dictionary with all graph's nodes as keys and their community index as value.

Expand Down
2 changes: 1 addition & 1 deletion networkx/algorithms/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ def k_corona(G, k, core_number=None):
"""Returns the k-corona of G.

The k-corona is the subgraph of nodes in the k-core which have
exactly k neighbours in the k-core.
exactly k neighbors in the k-core.

.. deprecated:: 3.3
`k_corona` will not accept `MultiGraph` objects in version 3.5.
Expand Down
6 changes: 3 additions & 3 deletions networkx/algorithms/dominating.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@ def dominating_set(G, start_with=None):
while remaining_nodes:
# Choose an arbitrary node and determine its undominated neighbors.
v = remaining_nodes.pop()
undominated_neighbors = set(G[v]) - dominating_set
undominated_nbrs = set(G[v]) - dominating_set
# Add the node to the dominating set and the neighbors to the
# dominated set. Finally, remove all of those nodes from the set
# of remaining nodes.
dominating_set.add(v)
dominated_nodes |= undominated_neighbors
remaining_nodes -= undominated_neighbors
dominated_nodes |= undominated_nbrs
remaining_nodes -= undominated_nbrs
return dominating_set


Expand Down
6 changes: 3 additions & 3 deletions networkx/algorithms/graph_hashing.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def weisfeiler_lehman_graph_hash(
):
"""Return Weisfeiler Lehman (WL) graph hash.

The function iteratively aggregates and hashes neighbourhoods of each node.
The function iteratively aggregates and hashes neighborhoods of each node.
After each node's neighbors are hashed to obtain updated node labels,
a hashed histogram of resulting labels is returned as the final hash.

Expand Down Expand Up @@ -176,7 +176,7 @@ def weisfeiler_lehman_subgraph_hashes(
additionally a hash of the initial node label (or equivalently a
subgraph of depth 0)

The function iteratively aggregates and hashes neighbourhoods of each node.
The function iteratively aggregates and hashes neighborhoods of each node.
This is achieved for each step by replacing for each node its label from
the previous iteration with its hashed 1-hop neighborhood aggregate.
The new node label is then appended to a list of node labels for each
Expand Down Expand Up @@ -254,7 +254,7 @@ def weisfeiler_lehman_subgraph_hashes(

The first 2 WL subgraph hashes match. From this we can conclude that it's very
likely the neighborhood of 4 hops around these nodes are isomorphic: each
iteration aggregates 1-hop neighbourhoods meaning hashes at depth $n$ are influenced
iteration aggregates 1-hop neighborhoods meaning hashes at depth $n$ are influenced
by every node within $2n$ hops.

However the neighborhood of 6 hops is no longer isomorphic since their 3rd hash does
Expand Down
8 changes: 4 additions & 4 deletions networkx/algorithms/isomorphism/ismags.py
Original file line number Diff line number Diff line change
Expand Up @@ -848,11 +848,11 @@ def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=No
left_to_map = to_be_mapped - set(mapping.keys())

new_candidates = candidates.copy()
sgn_neighbours = set(self.subgraph[sgn])
not_gn_neighbours = set(self.graph.nodes) - set(self.graph[gn])
sgn_nbrs = set(self.subgraph[sgn])
not_gn_nbrs = set(self.graph.nodes) - set(self.graph[gn])
for sgn2 in left_to_map:
if sgn2 not in sgn_neighbours:
gn2_options = not_gn_neighbours
if sgn2 not in sgn_nbrs:
gn2_options = not_gn_nbrs
else:
# Get all edges to gn of the right color:
g_edges = self._edges_of_same_color(sgn, sgn2)
Expand Down
8 changes: 4 additions & 4 deletions networkx/algorithms/isomorphism/vf2pp.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,8 +476,8 @@ def _find_candidates(
G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params
mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params

covered_neighbors = [nbr for nbr in G1[u] if nbr in mapping]
if not covered_neighbors:
covered_nbrs = [nbr for nbr in G1[u] if nbr in mapping]
if not covered_nbrs:
candidates = set(nodes_of_G2Labels[G1_labels[u]])
candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]])
candidates.intersection_update(T2_tilde)
Expand All @@ -492,10 +492,10 @@ def _find_candidates(
)
return candidates

nbr1 = covered_neighbors[0]
nbr1 = covered_nbrs[0]
common_nodes = set(G2[mapping[nbr1]])

for nbr1 in covered_neighbors[1:]:
for nbr1 in covered_nbrs[1:]:
common_nodes.intersection_update(G2[mapping[nbr1]])

common_nodes.difference_update(reverse_mapping)
Expand Down
10 changes: 5 additions & 5 deletions networkx/algorithms/matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ class Blossom:
# and w is a vertex in b.childs[wrap(i+1)].

# If b is a top-level S-blossom,
# b.mybestedges is a list of least-slack edges to neighbouring
# b.mybestedges is a list of least-slack edges to neighboring
# S-blossoms, or None if no such list has been computed yet.
# This is used for efficient computation of delta3.

Expand Down Expand Up @@ -738,12 +738,12 @@ def _recurse(b, endstage):
j += jstep
while b.childs[j] != entrychild:
# Examine the vertices of the sub-blossom to see whether
# it is reachable from a neighbouring S-vertex outside the
# it is reachable from a neighboring S-vertex outside the
# expanding blossom.
bv = b.childs[j]
if label.get(bv) == 1:
# This sub-blossom just got label S through one of its
# neighbours; leave it be.
# neighbors; leave it be.
j += jstep
continue
if isinstance(bv, Blossom):
Expand Down Expand Up @@ -972,11 +972,11 @@ def verifyOptimum():
v = queue.pop()
assert label[inblossom[v]] == 1

# Scan its neighbours:
# Scan its neighbors:
for w in G.neighbors(v):
if w == v:
continue # ignore self-loops
# w is a neighbour to v
# w is a neighbor to v
bv = inblossom[v]
bw = inblossom[w]
if bv == bw:
Expand Down
14 changes: 7 additions & 7 deletions networkx/algorithms/planar_drawing.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,18 +78,18 @@ def combinatorial_embedding_to_pos(embedding, fully_triangulate=False):
left_t_child[v3] = None

for k in range(3, len(node_list)):
vk, contour_neighbors = node_list[k]
wp = contour_neighbors[0]
wp1 = contour_neighbors[1]
wq = contour_neighbors[-1]
wq1 = contour_neighbors[-2]
adds_mult_tri = len(contour_neighbors) > 2
vk, contour_nbrs = node_list[k]
wp = contour_nbrs[0]
wp1 = contour_nbrs[1]
wq = contour_nbrs[-1]
wq1 = contour_nbrs[-2]
adds_mult_tri = len(contour_nbrs) > 2

# Stretch gaps:
delta_x[wp1] += 1
delta_x[wq] += 1

delta_x_wp_wq = sum(delta_x[x] for x in contour_neighbors[1:])
delta_x_wp_wq = sum(delta_x[x] for x in contour_nbrs[1:])

# Adjust offsets
delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2
Expand Down
14 changes: 7 additions & 7 deletions networkx/algorithms/similarity.py
Original file line number Diff line number Diff line change
Expand Up @@ -1689,22 +1689,22 @@ def generate_random_paths(
for _ in range(path_length):
# Randomly sample a neighbor (v_j) according
# to transition probabilities from ``node`` (v) to its neighbors
neighbor_index = np.random.choice(
nbr_index = np.random.choice(
num_nodes, p=transition_probabilities[starting_index]
)

# Set current vertex (v = v_j)
starting_index = neighbor_index
starting_index = nbr_index

# Add v into p_r
neighbor_node = node_map[neighbor_index]
path.append(neighbor_node)
nbr_node = node_map[nbr_index]
path.append(nbr_node)

# Add p_r into P_v
if index_map is not None:
if neighbor_node in index_map:
index_map[neighbor_node].add(path_index)
if nbr_node in index_map:
index_map[nbr_node].add(path_index)
else:
index_map[neighbor_node] = {path_index}
index_map[nbr_node] = {path_index}

yield path
18 changes: 9 additions & 9 deletions networkx/algorithms/sparsifiers.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,11 +136,11 @@ def spanner(G, stretch, weight=None, seed=None):
# remove edges to centers with edge weight less than
# closest_center_weight
for neighbor in residual_graph.adj[v]:
neighbor_cluster = clustering[neighbor]
neighbor_weight = lightest_edge_weight[neighbor_cluster]
nbr_cluster = clustering[neighbor]
nbr_weight = lightest_edge_weight[nbr_cluster]
if (
neighbor_cluster == closest_center
or neighbor_weight < closest_center_weight
nbr_cluster == closest_center
or nbr_weight < closest_center_weight
):
edges_to_remove.add((v, neighbor))

Expand Down Expand Up @@ -257,14 +257,14 @@ def _lightest_edge_dicts(residual_graph, clustering, node):
lightest_edge_neighbor = {}
lightest_edge_weight = {}
for neighbor in residual_graph.adj[node]:
neighbor_center = clustering[neighbor]
nbr_center = clustering[neighbor]
weight = residual_graph[node][neighbor]["weight"]
if (
neighbor_center not in lightest_edge_weight
or weight < lightest_edge_weight[neighbor_center]
nbr_center not in lightest_edge_weight
or weight < lightest_edge_weight[nbr_center]
):
lightest_edge_neighbor[neighbor_center] = neighbor
lightest_edge_weight[neighbor_center] = weight
lightest_edge_neighbor[nbr_center] = neighbor
lightest_edge_weight[nbr_center] = weight
return lightest_edge_neighbor, lightest_edge_weight


Expand Down