From 467856df7d9cab769d4a4894b2e69c888821a214 Mon Sep 17 00:00:00 2001 From: mbesancon Date: Mon, 4 Jun 2018 21:21:14 -0400 Subject: [PATCH 1/4] ignore coverage file --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 340a88323..8aa929b1e 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ docs/build/ docs/site/ benchmark/.results/* benchmark/.tune.jld +*.cov From 643c2a05f940cfb0fa7bd2583bbcd44e260ec5b0 Mon Sep 17 00:00:00 2001 From: mbesancon Date: Mon, 4 Jun 2018 21:26:29 -0400 Subject: [PATCH 2/4] merge master (#2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * removed flow algorithms (#815) * fixes #820, plus tests (#821) * change show() for empty graphs (ref https://github.com/JuliaGraphs/MetaGraphs.jl/issues/20#issuecomment-359937402) (#822) * Pull request clique_percolation.jl (#826) clique percolation is a method of uncovering the overlapping community structure of complex networks in nature and society * add src/community/clique_percolation.jl * tests in file test/community/clique_percolation.jl * cites the original clique percolation paper * for undirected graphs only using traitfn * in_ / out_ -> in / out (#830) * (in,out)_neighbors -> (in,out)neighbors * all_neighbors -> allneighbors * Pull request clique_percolation.jl (#826) clique percolation is a method of uncovering the overlapping community structure of complex networks in nature and society * add src/community/clique_percolation.jl * tests in file test/community/clique_percolation.jl * cites the original clique percolation paper * for undirected graphs only using traitfn * revert allneighbors * expected_degree_graph (Chung-Lu model) (#832) * Expected degree random graph generator implemented, including tests * algorithm corrected * Missing seed corrected in expected_degree_graph * expected_degree_graph! implemented * Added return in function, comment with references removed, references in docs added (expected_degree_graph) * Update randgraphs.jl minor doc update * Update randgraphs.jl * Fixing problems with MST Kruskal's on SimpleWeightedGraphs (#835) * Update kruskal.jl * Update prim.jl * Update kruskal.jl * Update kruskal.jl * Update prim.jl * Update kruskal.jl * Update prim.jl * Update kruskal.jl * Update prim.jl * reverting changes * Revert "reverting changes" This reverts commit ac1760be7caac6fbf7b03fa159e1460c3c5de586. * Revert "Update prim.jl" This reverts commit 677f6fac477b9e1b465fac861a2191f27d1b9fee. * Revert "Update kruskal.jl" This reverts commit a0e9c47c99d556bf30d75b7145f1fcec947bcee2. * Revert "Update prim.jl" This reverts commit 793bac4b260b141b459bd0486aedec66d0231878. * Revert "Update kruskal.jl" This reverts commit 6114e16d7764b0bc1751a5edfc61e7723a35a4bd. * Revert "Update prim.jl" This reverts commit 551f1e6b09e4aad7e296bbeaf924567478dd7ce2. * Revert "Update kruskal.jl" This reverts commit 941005e909dd9c16191caa0488c17b5edf1c4e58. * Revert "Update kruskal.jl" This reverts commit a4045144d7109ecd7e7c1324acb46952036b1f23. * Revert "Update prim.jl" This reverts commit 2d43a60e7abba1fc8d268d317dbd3282ed98995b. * Revert "Update kruskal.jl" This reverts commit 457792053fc754f503f728e127521ed1b0010997. * fix problems with SimpleWeightedGraphs * fix problems with SimpleWeightedGraphs * fix problems with SimpleWeightedGraphs * bipartite_map on 2-order graphs fixed. Added proper tests. Fixed test connected to bipartite_map (#836) * Correct pre-allocation of memory in Prim's MST (#839) * Improve Kruskal's MST by optimizing Union-Find (#843) * add missing backtick (#846) * Add greedy_color for Graph Coloring (#844) * Add greedy_color for Graph Coloring * Improve Kruskal's MST by optimizing Union-Find (#843) * Update README.md * Update README.md * first cut at 0.7 compatibility (#848) * using LightGraphs does not error * Switch to LinearAlgebra and SparseArrays std libs * Fix most of linalg tests * Add SharedArrays for distance tests to compile * Add Random and Markdown to stdlibs used * Fix connectivity tests * IntSet -> BitSet * Add DelimitedFiles stdlib for readcsv * Fix failing test * first cut * Use mauro/SimpleTraits.jl/m3/where-fns in tests * Fix SimpleTraits checkout (#851) * Move up SimpleTraits checkout (#852) * Update runtests.jl * Update REQUIRE * Update REQUIRE * femtocleaner with additional depwarn fixes (#856) fix deprecation warnings based on local femtocleaner run * use equalto in degeneracy.jl (#858) * fix depwarns in linalg submodule (#860) * update linalg/spectral to fix deprecations * fix depwarns in graphmatrices * fixes doc deprecation warnings (#861) * fixes doc deprecation warnings * adding Base64 to runtests * Update README.md * remove add/remove vertex/edge from core, minor bug fix (#862) * remove add/remove vertex/edge from core, minor bug fix * fix tests * export add/rem vertex * remove long-term deprecation warnings (#863) * uninitialized -> undef, blkdiag -> blockdiag, and removed import of d… (#865) * uninitialized -> undef, blkdiag -> blockdiag, and removed import of deprecated functions from LG to LinAlg * test coverage for digraph eltype * removes equalto (#867) * optional sorting algorithm for gdistances (#868) add the ability to pass RadixSort to gdistances! * update url and mention directed graphs explicilty (#837) * update url and mention directed graphs explicilty * Update graphtypes.md * Update graphtypes.md fixed references. * Speed improvements for function transitiveclosure! (#870) * Speed improvements for function transitiveclosure! Instead of checking for all paths i -> k and k -> j for a given vertex k we only iterate over the in- and outneighbours of k. * Merged some conditionals into a single statement * Cache efficient Floyd Warshall (#873) * Update floyd-warshall.jl * Cache efficient Floyd Warshall * fixed an error where smallgraph(:frucht) had 20 vertices instead of 12 (#878) * Delete .Rhistory * Added function transitivereduction (#877) * added function transitivereduction * Update transitivity.jl docstring formatting * Fixed some tests && added testdigraphs for all tests * Johnson Shortest Path for Sparse Graphs (#884) * Johnson Shortest Path for Sparse Graphs Johnson Shortest Path for Sparse Graphs * Improved memory efficiency if distmx is mutable * Improved memory efficiency for parallel implementation * Update index.md * Added constructors to create graphs from a vector or an iterator of edges (#880) * Added constructors to create SimpleGraphs and SimpleDiGraphs from a vector or an iterator of edges * Added constructors to create SimpleGraphs and SimpleDiGraphs from a vector or an iterator of edges * Slyles1001/892 (#894) * comments are your friend * Most of LightGraphs warnings are fixed * Delete HITS.jl * Slyles1001/872 (#891) * DataStructures fixed * missed heappop!, now it tests clean * spaces * Update LightGraphs.jl * Update runtests.jl * fixes most depwarns as of 20180529 (#895) * fixes most depwarns as of 20180529 * graphmatrices problems * remove tabs * tabs, again * Update CONTRIBUTING.md (#897) * Improve Kruskal and use in-built disjoint set data structure (#896) * Improve Kruskal and use in-built disjoint set data structure * Update kruskal.jl Changes requested by @somil55 --- .Rhistory | 0 .travis.yml | 6 +- CONTRIBUTING.md | 20 +-- README.md | 5 +- REQUIRE | 4 +- benchmark/benchmarks.jl | 1 - benchmark/core.jl | 2 +- benchmark/max-flow.jl | 10 -- docs/make.jl | 3 +- docs/src/basicproperties.md | 8 +- docs/src/developing.md | 4 +- docs/src/distance.md | 4 +- docs/src/flowcut.md | 24 --- docs/src/graphtypes.md | 12 +- docs/src/index.md | 2 +- docs/src/plotting.md | 2 +- src/LightGraphs.jl | 68 ++++---- src/SimpleGraphs/SimpleGraphs.jl | 54 +++--- src/SimpleGraphs/simpledigraph.jl | 156 ++++++++++++++++- src/SimpleGraphs/simpleedge.jl | 2 +- src/SimpleGraphs/simplegraph.jl | 159 +++++++++++++++-- src/biconnectivity/articulation.jl | 4 +- src/biconnectivity/biconnect.jl | 4 +- src/centrality/betweenness.jl | 4 +- src/centrality/closeness.jl | 18 +- src/centrality/eigenvector.jl | 2 +- src/centrality/katz.jl | 4 +- src/centrality/radiality.jl | 12 +- src/centrality/stress.jl | 2 +- src/community/clique_percolation.jl | 34 ++++ src/community/cliques.jl | 4 +- src/community/clustering.jl | 6 +- src/community/core-periphery.jl | 4 +- src/community/label_propagation.jl | 8 +- src/connectivity.jl | 26 +-- src/core.jl | 16 +- src/degeneracy.jl | 10 +- src/deprecations.jl | 10 +- src/digraph/cycles/hadwick-james.jl | 6 +- src/digraph/cycles/johnson.jl | 34 ++-- src/digraph/cycles/karp.jl | 4 +- src/digraph/transitivity.jl | 92 ++++++++-- src/distance.jl | 46 +++-- src/edit_distance.jl | 20 +-- src/flow/boykov_kolmogorov.jl | 202 --------------------- src/flow/dinic.jl | 118 ------------- src/flow/edmonds_karp.jl | 168 ------------------ src/flow/ext_multiroute_flow.jl | 252 --------------------------- src/flow/kishimoto.jl | 70 -------- src/flow/maximum_flow.jl | 180 ------------------- src/flow/multiroute_flow.jl | 226 ------------------------ src/flow/push_relabel.jl | 205 ---------------------- src/generators/euclideangraphs.jl | 8 +- src/generators/randgraphs.jl | 115 ++++++++---- src/generators/smallgraphs.jl | 6 +- src/generators/staticgraphs.jl | 9 +- src/graphcut/normalized_cut.jl | 52 +++--- src/interface.jl | 39 +---- src/linalg/LinAlg.jl | 10 +- src/linalg/graphmatrices.jl | 214 +++++++++++------------ src/linalg/nonbacktracking.jl | 22 +-- src/linalg/spectral.jl | 44 ++--- src/operators.jl | 74 ++++---- src/persistence/lg.jl | 4 +- src/shortestpaths/astar.jl | 10 +- src/shortestpaths/bellman-ford.jl | 6 +- src/shortestpaths/dijkstra.jl | 61 +++---- src/shortestpaths/floyd-warshall.jl | 25 +-- src/shortestpaths/johnson.jl | 103 +++++++++++ src/shortestpaths/yen.jl | 6 +- src/spanningtrees/kruskal.jl | 56 ++---- src/spanningtrees/prim.jl | 16 +- src/traversals/bfs.jl | 56 ++++-- src/traversals/bipartition.jl | 10 +- src/traversals/dfs.jl | 10 +- src/traversals/diffusion.jl | 11 +- src/traversals/greedy_color.jl | 129 ++++++++++++++ src/traversals/maxadjvisit.jl | 10 +- src/traversals/parallel_bfs.jl | 7 +- src/traversals/randomwalks.jl | 14 +- src/utils.jl | 5 +- test/biconnectivity/articulation.jl | 14 +- test/biconnectivity/biconnect.jl | 10 +- test/centrality/betweenness.jl | 6 +- test/centrality/eigenvector.jl | 6 +- test/centrality/katz.jl | 2 +- test/centrality/pagerank.jl | 12 +- test/centrality/radiality.jl | 2 +- test/centrality/stress.jl | 2 +- test/community/clique_percolation.jl | 19 ++ test/community/core-periphery.jl | 2 +- test/community/label_propagation.jl | 20 +-- test/connectivity.jl | 10 +- test/digraph/transitivity.jl | 75 ++++++++ test/flow/boykov_kolmogorov.jl | 30 ---- test/flow/dinic.jl | 62 ------- test/flow/edmonds_karp.jl | 59 ------- test/flow/maximum_flow.jl | 57 ------ test/flow/multiroute_flow.jl | 86 --------- test/flow/push_relabel.jl | 94 ---------- test/generators/binomial.jl | 3 +- test/generators/randgraphs.jl | 60 ++++--- test/generators/smallgraphs.jl | 2 +- test/generators/staticgraphs.jl | 12 +- test/graphcut/normalized_cut.jl | 6 +- test/interface.jl | 6 +- test/linalg/graphmatrices.jl | 68 ++++---- test/linalg/runtests.jl | 1 + test/linalg/spectral.jl | 48 ++--- test/operators.jl | 18 +- test/runtests.jl | 17 +- test/shortestpaths/astar.jl | 2 +- test/shortestpaths/bellman-ford.jl | 2 +- test/shortestpaths/dijkstra.jl | 226 ++++++++++++------------ test/shortestpaths/johnson.jl | 44 +++++ test/shortestpaths/yen.jl | 2 +- test/simplegraphs/simpleedgeiter.jl | 1 + test/simplegraphs/simplegraphs.jl | 193 +++++++++++++++++++- test/traversals/bfs.jl | 22 +-- test/traversals/bipartition.jl | 24 ++- test/traversals/greedy_color.jl | 32 ++++ test/traversals/parallel_bfs.jl | 2 +- 122 files changed, 1999 insertions(+), 2829 deletions(-) delete mode 100644 .Rhistory delete mode 100644 benchmark/max-flow.jl delete mode 100644 docs/src/flowcut.md create mode 100644 src/community/clique_percolation.jl delete mode 100644 src/flow/boykov_kolmogorov.jl delete mode 100644 src/flow/dinic.jl delete mode 100644 src/flow/edmonds_karp.jl delete mode 100644 src/flow/ext_multiroute_flow.jl delete mode 100644 src/flow/kishimoto.jl delete mode 100644 src/flow/maximum_flow.jl delete mode 100644 src/flow/multiroute_flow.jl delete mode 100644 src/flow/push_relabel.jl create mode 100644 src/shortestpaths/johnson.jl create mode 100644 src/traversals/greedy_color.jl create mode 100644 test/community/clique_percolation.jl delete mode 100644 test/flow/boykov_kolmogorov.jl delete mode 100644 test/flow/dinic.jl delete mode 100644 test/flow/edmonds_karp.jl delete mode 100644 test/flow/maximum_flow.jl delete mode 100644 test/flow/multiroute_flow.jl delete mode 100644 test/flow/push_relabel.jl create mode 100644 test/shortestpaths/johnson.jl create mode 100644 test/traversals/greedy_color.jl diff --git a/.Rhistory b/.Rhistory deleted file mode 100644 index e69de29bb..000000000 diff --git a/.travis.yml b/.travis.yml index 5df86d648..a637934ca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,13 +5,9 @@ os: # - osx julia: - - 0.6 +# - 0.6 - nightly -matrix: - allow_failures: - - julia: nightly - notifications: email: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0313f8715..272c8a7a5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,9 +14,9 @@ Please include version numbers of all relevant libraries and Julia itself. - Open an issue to discuss a feature before you start coding (this maximizes the likelihood of patch acceptance). - Minimize dependencies on external packages, and avoid introducing new dependencies. In general, - - PRs introducing dependencies on core Julia packages are ok. - - PRs introducing dependencies on non-core "leaf" packages (no subdependencies except for core Julia packages) are less ok. - - PRs introducing dependencies on non-core non-leaf packages require strict scrutiny and will likely not be accepted without some compelling reason (urgent bugfix or much-needed functionality). + - PRs introducing dependencies on Julia Base or the packages in the Julia Standard Library are ok. + - PRs introducing dependencies on third-party non-core "leaf" packages (no subdependencies except for Julia Base / Standard Library packages) are less ok. + - PRs introducing dependencies on third-party non-core non-leaf packages (that is, third-party packages that have dependencies on one or more other third-party packages) require strict scrutiny and will likely not be accepted without some compelling reason (urgent bugfix or much-needed functionality). - Put type assertions on all function arguments where conflict may arise (use abstract types, Union, or Any if necessary). - If the algorithm was presented in a paper, include a reference to the paper (i.e. a proper academic citation along with an eprint link). @@ -27,7 +27,7 @@ Please include version numbers of all relevant libraries and Julia itself. - When possible write code to reuse memory. For example: ```julia function f(g, v) - storage = Vector{Int}(nv(g)) + storage = Vector{Int}(undef, nv(g)) # some code operating on storage, g, and v. for i in 1:nv(g) storage[i] = v-i @@ -38,7 +38,7 @@ end should be rewritten as two functions ```julia function f(g::AbstractGraph, v::Integer) - storage = Vector{Int}(nv(g)) + storage = Vector{Int}(undef, nv(g)) return f!(g, v, storage) end @@ -60,17 +60,17 @@ Locate the section for your github remote in the `.git/config` file. It looks li ``` [remote "origin"] - fetch = +refs/heads/*:refs/remotes/origin/* - url = git@github.com:JuliaGraphs/LightGraphs.jl.git + fetch = +refs/heads/*:refs/remotes/origin/* + url = git@github.com:JuliaGraphs/LightGraphs.jl.git ``` Now add the line `fetch = +refs/pull/*/head:refs/remotes/origin/pr/*` to this section. Obviously, change the github url to match your project's URL. It ends up looking like this: ``` [remote "origin"] - fetch = +refs/heads/*:refs/remotes/origin/* - url = git@github.com:JuliaGraphs/LightGraphs.jl.git - fetch = +refs/pull/*/head:refs/remotes/origin/pr/* + fetch = +refs/heads/*:refs/remotes/origin/* + url = git@github.com:JuliaGraphs/LightGraphs.jl.git + fetch = +refs/pull/*/head:refs/remotes/origin/pr/* ``` Now fetch all the pull requests: diff --git a/README.md b/README.md index a203c0258..5861c615d 100644 --- a/README.md +++ b/README.md @@ -48,11 +48,12 @@ julia> Pkg.add("LightGraphs") ``` ## Supported Versions -* LightGraphs master is designed to work with the latest stable version of Julia. +* LightGraphs master is generally designed to work with the latest stable version of Julia (except during Julia version increments as we transition to the new version). * Julia 0.3: LightGraphs v0.3.7 is the last version guaranteed to work with Julia 0.3. * Julia 0.4: LightGraphs versions in the 0.6 series are designed to work with Julia 0.4. * Julia 0.5: LightGraphs versions in the 0.7 series are designed to work with Julia 0.5. -* Julia 0.6: LightGraphs versions in the 0.8 through 0.11 series are designed to work with Julia 0.6. +* Julia 0.6: LightGraphs versions in the 0.8 through 0.12 series are designed to work with Julia 0.6. +* Julia 0.7: LightGraphs versions in the 0.13 series are designed to work with Julia 0.7. * Later versions: Some functionality might not work with prerelease / unstable / nightly versions of Julia. If you run into a problem, please file an issue. # Contributing and Reporting Bugs diff --git a/REQUIRE b/REQUIRE index 8e140b2a7..2bc8141d3 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,4 +1,4 @@ -julia 0.6 +julia 0.7- CodecZlib 0.4 DataStructures 0.7 -SimpleTraits 0.4.0 +SimpleTraits 0.7.1 diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 1916ad367..cb9149d6d 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -32,6 +32,5 @@ GRAPHS = Dict{String,Graph}( include("edges.jl") include("centrality.jl") include("connectivity.jl") - include("max-flow.jl") include("traversals.jl") end diff --git a/benchmark/core.jl b/benchmark/core.jl index c05646d04..a0e9e229a 100644 --- a/benchmark/core.jl +++ b/benchmark/core.jl @@ -7,7 +7,7 @@ function bench_iteredges(g::AbstractGraph) end function bench_has_edge(g::AbstractGraph) - srand(1) + Random.srand(1) nvg = nv(g) srcs = rand([1:nvg;], cld(nvg, 4)) dsts = rand([1:nvg;], cld(nvg, 4)) diff --git a/benchmark/max-flow.jl b/benchmark/max-flow.jl deleted file mode 100644 index b1b410322..000000000 --- a/benchmark/max-flow.jl +++ /dev/null @@ -1,10 +0,0 @@ -@benchgroup "max-flow" begin - for n in 9:5:29 - srand(1) - p = 8.0 / n - A = sprand(n, n, p) - g = SimpleDiGraph(A) - cap = round.(A * 100) - @bench "n = $n" LightGraphs.maximum_flow($g, 1, $n, $cap) - end -end # max-flow diff --git a/docs/make.jl b/docs/make.jl index 711419f18..f376bf04b 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,5 +1,5 @@ using Documenter -include("../src/LightGraphs.jl") +#include("../src/LightGraphs.jl") using LightGraphs # same for contributing and license @@ -28,7 +28,6 @@ makedocs( "Matching" => "matching.md", "Community Structures" => "community.md", "Degeneracy" => "degeneracy.md", - "Flow and Cut" => "flowcut.md", "Integration with other packages" => "integration.md", "Contributing" => "contributing.md", "Developer Notes" => "developing.md", diff --git a/docs/src/basicproperties.md b/docs/src/basicproperties.md index 9e4361caa..df402b09e 100644 --- a/docs/src/basicproperties.md +++ b/docs/src/basicproperties.md @@ -15,10 +15,10 @@ The following is an overview of functions for accessing graph properties. For fu ## Vertex Properties -- `neighbors`: Return array of neighbors of a vertex. If graph is directed, output is equivalent of `out_neighbors`. -- `all_neighbors`: Returns array of all neighbors (both `in_neighbors` and `out_neighbors`). For undirected graphs, equivalent to `neighbors`. -- `in_neighbors`: Return array of in-neighbors. Equivalent to `neighbors` for undirected graphs. -- `out_neighbors`: Return array of out-neighbors. Equivalent to `neighbors` for undirected graphs. +- `neighbors`: Return array of neighbors of a vertex. If graph is directed, output is equivalent of `outneighbors`. +- `all_neighbors`: Returns array of all neighbors (both `inneighbors` and `outneighbors`). For undirected graphs, equivalent to `neighbors`. +- `inneighbors`: Return array of in-neighbors. Equivalent to `neighbors` for undirected graphs. +- `outneighbors`: Return array of out-neighbors. Equivalent to `neighbors` for undirected graphs. ## Edge Properties diff --git a/docs/src/developing.md b/docs/src/developing.md index fef53bae8..a90b555f6 100644 --- a/docs/src/developing.md +++ b/docs/src/developing.md @@ -10,14 +10,14 @@ within the LightGraphs package should just work: - [`eltype`](@ref) - [`has_edge`](@ref) - [`has_vertex`](@ref) -- [`in_neighbors`](@ref) +- [`inneighbors`](@ref) - [`is_directed`](@ref): Note that since we use traits to determine directedness, `is_directed` for a `CustomGraph` type should have the following signatures: - `is_directed(::Type{CustomGraph})::Bool` - `is_directed(g::CustomGraph)::Bool` - [`ne`](@ref) - [`nv`](@ref) -- [`out_neighbors`](@ref) +- [`outneighbors`](@ref) - [`vertices`](@ref) If the graph structure is designed to represent weights on edges, the [`weights`](@ref) function should also be defined. Note that the output does not necessarily have to be a dense matrix, but it must be indexable via `[u, v]`. diff --git a/docs/src/distance.md b/docs/src/distance.md index 1706835d1..38e963323 100644 --- a/docs/src/distance.md +++ b/docs/src/distance.md @@ -11,8 +11,8 @@ Pages = ["distance.md"] ```@autodocs Modules = [LightGraphs] Pages = [ - "distance.jl", - "transitivity.jl" + "distance.jl", + "transitivity.jl" ] Private = false ``` diff --git a/docs/src/flowcut.md b/docs/src/flowcut.md deleted file mode 100644 index 38d1cc332..000000000 --- a/docs/src/flowcut.md +++ /dev/null @@ -1,24 +0,0 @@ -# Flow and Cut -*LightGraphs.jl* provides different algorithms for [maximum flow](https://en.wikipedia.org/wiki/Maximum_flow_problem) -and minimum cut computations, including: - -```@index -Order = [:type, :function] -Pages = ["flowcut.md"] -``` - -## Full Docs - -```@autodocs -Modules = [LightGraphs] -Pages = [ "flow/boykov_kolmogorov.jl", - "flow/dinic.jl", - "flow/edmonds_karp.jl", - "flow/ext_multiroute_flow.jl", - "flow/kishimoto", - "flow/maximum_flow.jl", - "flow/multiroute_flow.jl", - "flow/push_relabel.jl" - ] -Private = false -``` diff --git a/docs/src/graphtypes.md b/docs/src/graphtypes.md index 4838689e4..77b176920 100644 --- a/docs/src/graphtypes.md +++ b/docs/src/graphtypes.md @@ -2,8 +2,8 @@ In addition to providing `SimpleGraph` and `SimpleDiGraph` implementations, LightGraphs also serves as a framework for other graph types. Currently, there are several alternative graph types, each with its own package: -- [SimpleWeightedGraphs](https://github.com/JuliaGraphs/SimpleGraphs.jl) provides a graph structure with the ability to specify weights on edges. -- [MetaGraphs](https://github.com/JuliaGraphs/MetaGraphs.jl) provides a graph structure that supports user-defined properties on the graph, vertices, and edges. +- [SimpleWeightedGraphs](https://github.com/JuliaGraphs/SimpleWeightedGraphs.jl) provides a structure for (un)directed graphs with the ability to specify weights on edges. +- [MetaGraphs](https://github.com/JuliaGraphs/MetaGraphs.jl) provides a structure (un)directed graphs that supports user-defined properties on the graph, vertices, and edges. - [StaticGraphs](https://github.com/JuliaGraphs/StaticGraphs.jl) supports very large graph structures in a space- and time-efficient manner, but as the name implies, does not allow modification of the graph once created. @@ -11,7 +11,7 @@ created. These are general guidelines to help you select the proper graph type. -- In general, prefer `SimpleGraphs`. -- If you need edge weights and don't require large numbers of graph modifications, use `SimpleWeightedGraphs`. -- If you need labeling of vertices or edges, use `MetaGraphs`. -- If you work with very large graphs (billons to tens of billions of edges) and don't need mutability, use `StaticGraphs`. +- In general, prefer the native `SimpleGraphs`/`SimpleDiGraphs` structures in [LightGraphs.jl](https://github.com/JuliaGraphs/LightGraphs.jl). +- If you need edge weights and don't require large numbers of graph modifications, use [SimpleWeightedGraphs](https://github.com/JuliaGraphss/SimpleWeightedGraphs.jl). +- If you need labeling of vertices or edges, use [MetaGraphs](https://github.com/JuliaGraphs/MetaGraphs.jl). +- If you work with very large graphs (billons to tens of billions of edges) and don't need mutability, use [StaticGraphs](https://github.com/JuliaGraphs/StaticGraphs.jl). diff --git a/docs/src/index.md b/docs/src/index.md index 86b8f4bf1..a58ddd0ec 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,4 +1,4 @@ -# Light Graphs +# LightGraphs The goal of *LightGraphs.jl* is to offer a performant platform for network and graph analysis in Julia. To this end, LightGraphs offers both (a) a set of simple, concrete graph implementations -- `SimpleGraph` (for undirected graphs) and `SimpleDiGraph` (for directed graphs), and (b) an API for the development of more sophisticated graph implementations under the `AbstractGraph` type. diff --git a/docs/src/plotting.md b/docs/src/plotting.md index 8ad245d3d..f795f3ba8 100644 --- a/docs/src/plotting.md +++ b/docs/src/plotting.md @@ -8,7 +8,7 @@ This excellent graph visualization package can be used with *LightGraphs.jl* as follows: ```julia -julia> g = WheelGraph(10); am = full(adjacency_matrix(g)) +julia> g = WheelGraph(10); am = Matrix(adjacency_matrix(g)) julia> loc_x, loc_y = layout_spring_adj(am) julia> draw_layout_adj(am, loc_x, loc_y, filename="wheel10.svg") ``` diff --git a/src/LightGraphs.jl b/src/LightGraphs.jl index 7ff8e0038..f6af17a99 100644 --- a/src/LightGraphs.jl +++ b/src/LightGraphs.jl @@ -1,21 +1,32 @@ __precompile__(true) module LightGraphs -import CodecZlib -using DataStructures using SimpleTraits +import CodecZlib +import DataStructures +import DelimitedFiles +import Distributed +import IterativeEigensolvers +import LinearAlgebra +import Markdown +import Random +import SharedArrays +import SparseArrays + import Base: write, ==, <, *, ≈, convert, isless, issubset, union, intersect, - reverse, reverse!, blkdiag, isassigned, getindex, setindex!, show, - print, copy, in, sum, size, sparse, eltype, length, ndims, transpose, - ctranspose, join, start, next, done, eltype, get, issymmetric, A_mul_B!, - Pair, Tuple, zero + reverse, reverse!, isassigned, getindex, setindex!, show, + print, copy, in, sum, size, eltype, length, ndims, transpose, + ctranspose, join, start, next, done, eltype, get, Pair, Tuple, zero + + export # Interface AbstractGraph, AbstractEdge, AbstractEdgeIter, -Edge, Graph, SimpleGraph, DiGraph, SimpleDiGraph, vertices, edges, edgetype, nv, ne, src, dst, -is_directed, add_vertex!, add_edge!, rem_vertex!, rem_edge!, -has_vertex, has_edge, in_neighbors, out_neighbors, +Edge, Graph, SimpleGraph, SimpleGraphFromIterator, DiGraph, SimpleDiGraphFromIterator, +SimpleDiGraph, vertices, edges, edgetype, nv, ne, src, dst, +is_directed, +has_vertex, has_edge, inneighbors, outneighbors, # core is_ordered, add_vertices!, indegree, outdegree, degree, @@ -23,6 +34,9 @@ is_ordered, add_vertices!, indegree, outdegree, degree, neighbors, all_neighbors, common_neighbors, has_self_loops, num_self_loops, density, squash, weights, +# simplegraphs +add_edge!, add_vertex!, add_vertices!, rem_edge!, rem_vertex!, + # decomposition core_number, k_core, k_shell, k_crust, k_corona, @@ -37,7 +51,7 @@ spectral_distance, edit_distance, MinkowskiCost, BoundedMinkowskiCost, # operators -complement, reverse, reverse!, blkdiag, union, intersect, +complement, reverse, reverse!, blockdiag, union, intersect, difference, symmetric_difference, join, tensor_product, cartesian_product, crosspath, induced_subgraph, egonet, merge_vertices!, merge_vertices, @@ -62,6 +76,9 @@ randomwalk, saw, non_backtracking_randomwalk, # diffusion diffusion, diffusion_rate, +# coloring +greedy_color, + # connectivity connected_components, strongly_connected_components, weakly_connected_components, is_connected, is_strongly_connected, is_weakly_connected, period, @@ -77,9 +94,9 @@ MaximumAdjacency, AbstractMASVisitor, mincut, maximum_adjacency_visit, # a-star, dijkstra, bellman-ford, floyd-warshall a_star, dijkstra_shortest_paths, bellman_ford_shortest_paths, -has_negative_edge_cycle, enumerate_paths, floyd_warshall_shortest_paths, -transitiveclosure!, transitiveclosure, yen_k_shortest_paths, -parallel_multisource_dijkstra_shortest_paths, +has_negative_edge_cycle, enumerate_paths, johnson_shortest_paths, +floyd_warshall_shortest_paths, transitiveclosure!, transitiveclosure, transitivereduction, +yen_k_shortest_paths, parallel_multisource_dijkstra_shortest_paths, # centrality betweenness_centrality, closeness_centrality, degree_centrality, @@ -97,20 +114,16 @@ contract, # persistence loadgraph, loadgraphs, savegraph, LGFormat, -# flow -maximum_flow, EdmondsKarpAlgorithm, DinicAlgorithm, BoykovKolmogorovAlgorithm, PushRelabelAlgorithm, -multiroute_flow, KishimotoAlgorithm, ExtendedMultirouteFlowAlgorithm, - # randgraphs -erdos_renyi, watts_strogatz, random_regular_graph, random_regular_digraph, random_configuration_model, -random_tournament_digraph, StochasticBlockModel, make_edgestream, nearbipartiteSBM, blockcounts, -blockfractions, stochastic_block_model, barabasi_albert, barabasi_albert!, static_fitness_model, -static_scale_free, kronecker, +erdos_renyi, expected_degree_graph, watts_strogatz, random_regular_graph, random_regular_digraph, +random_configuration_model, random_tournament_digraph, StochasticBlockModel, make_edgestream, +nearbipartiteSBM, blockcounts, blockfractions, stochastic_block_model, barabasi_albert, +barabasi_albert!, static_fitness_model, static_scale_free, kronecker, #community modularity, core_periphery_deg, local_clustering,local_clustering_coefficient, global_clustering_coefficient, triangles, -label_propagation, maximal_cliques, +label_propagation, maximal_cliques, clique_percolation, #generators CompleteGraph, StarGraph, PathGraph, WheelGraph, CycleGraph, @@ -189,6 +202,7 @@ include("digraph/cycles/hadwick-james.jl") include("digraph/cycles/karp.jl") include("traversals/bfs.jl") include("traversals/bipartition.jl") +include("traversals/greedy_color.jl") include("traversals/parallel_bfs.jl") include("traversals/dfs.jl") include("traversals/maxadjvisit.jl") @@ -200,6 +214,7 @@ include("edit_distance.jl") include("shortestpaths/astar.jl") include("shortestpaths/bellman-ford.jl") include("shortestpaths/dijkstra.jl") +include("shortestpaths/johnson.jl") include("shortestpaths/floyd-warshall.jl") include("shortestpaths/yen.jl") include("linalg/LinAlg.jl") @@ -223,14 +238,7 @@ include("community/label_propagation.jl") include("community/core-periphery.jl") include("community/clustering.jl") include("community/cliques.jl") -include("flow/maximum_flow.jl") -include("flow/edmonds_karp.jl") -include("flow/dinic.jl") -include("flow/boykov_kolmogorov.jl") -include("flow/push_relabel.jl") -include("flow/multiroute_flow.jl") -include("flow/kishimoto.jl") -include("flow/ext_multiroute_flow.jl") +include("community/clique_percolation.jl") include("spanningtrees/kruskal.jl") include("spanningtrees/prim.jl") include("biconnectivity/articulation.jl") diff --git a/src/SimpleGraphs/SimpleGraphs.jl b/src/SimpleGraphs/SimpleGraphs.jl index d7c532d91..9362337c2 100644 --- a/src/SimpleGraphs/SimpleGraphs.jl +++ b/src/SimpleGraphs/SimpleGraphs.jl @@ -1,19 +1,22 @@ module SimpleGraphs +using SparseArrays +using LinearAlgebra + import Base: eltype, show, ==, Pair, Tuple, copy, length, start, next, done, issubset, zero, in import LightGraphs: _NI, _insert_and_dedup!, AbstractGraph, AbstractEdge, AbstractEdgeIter, src, dst, edgetype, nv, ne, vertices, edges, is_directed, - add_vertex!, add_edge!, rem_vertex!, rem_edge!, - has_vertex, has_edge, in_neighbors, out_neighbors, + has_vertex, has_edge, inneighbors, outneighbors, indegree, outdegree, degree, has_self_loops, num_self_loops, insorted -export AbstractSimpleGraph, AbstractSimpleDiGraph, AbstractSimpleEdge, - SimpleEdge, SimpleGraph, SimpleGraphEdge, - SimpleDiGraph, SimpleDiGraphEdge +export AbstractSimpleGraph, AbstractSimpleEdge, + SimpleEdge, SimpleGraph, SimpleGraphFromIterator, SimpleGraphEdge, + SimpleDiGraph, SimpleDiGraphFromIterator, SimpleDiGraphEdge, + add_vertex!, add_edge!, rem_vertex!, rem_edge! """ @@ -28,16 +31,8 @@ AbstractSimpleGraphs must have the following elements: abstract type AbstractSimpleGraph{T<:Integer} <: AbstractGraph{T} end function show(io::IO, g::AbstractSimpleGraph{T}) where T - if is_directed(g) - dir = "directed" - else - dir = "undirected" - end - if nv(g) == 0 - print(io, "empty $dir simple $T graph") - else - print(io, "{$(nv(g)), $(ne(g))} $dir simple $T graph") - end + dir = is_directed(g) ? "directed" : "undirected" + print(io, "{$(nv(g)), $(ne(g))} $dir simple $T graph") end nv(g::AbstractSimpleGraph{T}) where T = T(length(fadj(g))) @@ -61,8 +56,8 @@ add_edge!(g::AbstractSimpleGraph, x) = add_edge!(g, edgetype(g)(x)) has_edge(g::AbstractSimpleGraph, x, y) = has_edge(g, edgetype(g)(x, y)) add_edge!(g::AbstractSimpleGraph, x, y) = add_edge!(g, edgetype(g)(x, y)) -in_neighbors(g::AbstractSimpleGraph, v::Integer) = badj(g, v) -out_neighbors(g::AbstractSimpleGraph, v::Integer) = fadj(g, v) +inneighbors(g::AbstractSimpleGraph, v::Integer) = badj(g, v) +outneighbors(g::AbstractSimpleGraph, v::Integer) = fadj(g, v) function issubset(g::T, h::T) where T<:AbstractSimpleGraph (gmin, gmax) = extrema(vertices(g)) @@ -78,7 +73,7 @@ function rem_edge!(g::AbstractSimpleGraph{T}, u::Integer, v::Integer) where T rem_edge!(g, edgetype(g)(T(u), T(v))) end -@doc_str """ +""" rem_vertex!(g, v) Remove the vertex `v` from graph `g`. Return false if removal fails @@ -98,43 +93,52 @@ vertices in `g` will be indexed by ``1:|V|-1``. function rem_vertex!(g::AbstractSimpleGraph, v::Integer) v in vertices(g) || return false n = nv(g) + self_loop_n = false # true if n is self-looped (see #820) # remove the in_edges from v - srcs = copy(in_neighbors(g, v)) + srcs = copy(inneighbors(g, v)) @inbounds for s in srcs rem_edge!(g, edgetype(g)(s, v)) end # remove the in_edges from the last vertex - neigs = copy(in_neighbors(g, n)) + neigs = copy(inneighbors(g, n)) @inbounds for s in neigs rem_edge!(g, edgetype(g)(s, n)) end if v != n # add the edges from n back to v @inbounds for s in neigs - add_edge!(g, edgetype(g)(s, v)) + if s != n # don't add an edge to the last vertex - see #820. + add_edge!(g, edgetype(g)(s, v)) + else + self_loop_n = true + end end end if is_directed(g) # remove the out_edges from v - dsts = copy(out_neighbors(g, v)) + dsts = copy(outneighbors(g, v)) @inbounds for d in dsts rem_edge!(g, edgetype(g)(v, d)) end # remove the out_edges from the last vertex - neigs = copy(out_neighbors(g, n)) + neigs = copy(outneighbors(g, n)) @inbounds for d in neigs rem_edge!(g, edgetype(g)(n, d)) end if v != n # add the out_edges back to v @inbounds for d in neigs - add_edge!(g, edgetype(g)(v, d)) + if d != n + add_edge!(g, edgetype(g)(v, d)) + end end end end - + if self_loop_n + add_edge!(g, edgetype(g)(v, v)) + end pop!(g.fadjlist) if is_directed(g) pop!(g.badjlist) diff --git a/src/SimpleGraphs/simpledigraph.jl b/src/SimpleGraphs/simpledigraph.jl index e2da7426a..159a9ed61 100644 --- a/src/SimpleGraphs/simpledigraph.jl +++ b/src/SimpleGraphs/simpledigraph.jl @@ -15,7 +15,7 @@ end eltype(x::SimpleDiGraph{T}) where T = T # DiGraph{UInt8}(6), DiGraph{Int16}(7), DiGraph{Int8}() -function (::Type{SimpleDiGraph{T}})(n::Integer = 0) where T<:Integer +function SimpleDiGraph{T}(n::Integer = 0) where T<:Integer fadjlist = [Vector{T}() for _ = one(T):n] badjlist = [Vector{T}() for _ = one(T):n] return SimpleDiGraph(0, fadjlist, badjlist) @@ -31,7 +31,7 @@ SimpleDiGraph(n::T) where T<:Integer = SimpleDiGraph{T}(n) SimpleDiGraph(::Type{T}) where T<:Integer = SimpleDiGraph{T}(zero(T)) # sparse adjacency matrix constructor: SimpleDiGraph(adjmx) -function (::Type{SimpleDiGraph{T}})(adjmx::SparseMatrixCSC{U}) where T<:Integer where U<:Real +function SimpleDiGraph{T}(adjmx::SparseArrays.SparseMatrixCSC{U}) where T<:Integer where U<:Real dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) @@ -50,14 +50,13 @@ function (::Type{SimpleDiGraph{T}})(adjmx::SparseMatrixCSC{U}) where T<:Integer end # dense adjacency matrix constructor: DiGraph{UInt8}(adjmx) -function (::Type{SimpleDiGraph{T}})(adjmx::AbstractMatrix) where T<:Integer +function SimpleDiGraph{T}(adjmx::AbstractMatrix{U}) where T<:Integer where U <: Real dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) g = SimpleDiGraph(T(dima)) - @inbounds for i in find(adjmx) - ind = ind2sub((dima, dimb), i) - add_edge!(g, ind...) + @inbounds for i in findall(adjmx.!=zero(U)) + add_edge!(g, i[1], i[2]) end return g end @@ -66,7 +65,7 @@ end SimpleDiGraph(adjmx::AbstractMatrix) = SimpleDiGraph{Int}(adjmx) # converts DiGraph{Int} to DiGraph{Int32} -function (::Type{SimpleDiGraph{T}})(g::SimpleDiGraph) where T<:Integer +function SimpleDiGraph{T}(g::SimpleDiGraph) where T<:Integer h_fadj = [Vector{T}(x) for x in fadj(g)] h_badj = [Vector{T}(x) for x in badj(g)] return SimpleDiGraph(ne(g), h_fadj, h_badj) @@ -83,6 +82,149 @@ function SimpleDiGraph(g::AbstractSimpleGraph) return h end + +@inbounds function cleanupedges!(fadjlist::Vector{Vector{T}}, + badjlist::Vector{Vector{T}}) where T<:Integer + neg = 0 + for v in 1:length(fadjlist) + if !issorted(fadjlist[v]) + sort!(fadjlist[v]) + end + if !issorted(badjlist[v]) + sort!(badjlist[v]) + end + unique!(fadjlist[v]) + unique!(badjlist[v]) + neg += length(fadjlist[v]) + end + return neg +end + +function SimpleDiGraph(edge_list::Vector{SimpleDiGraphEdge{T}}) where T<:Integer + nvg = zero(T) + @inbounds( + for e in edge_list + nvg = max(nvg, src(e), dst(e)) + end) + + list_sizes_out = ones(Int, nvg) + list_sizes_in = ones(Int, nvg) + degs_out = zeros(Int, nvg) + degs_in = zeros(Int, nvg) + @inbounds( + for e in edge_list + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + degs_out[s] += 1 + degs_in[d] += 1 + end) + + fadjlist = Vector{Vector{T}}(undef, nvg) + badjlist = Vector{Vector{T}}(undef, nvg) + @inbounds( + for v in 1:nvg + fadjlist[v] = Vector{T}(undef, degs_out[v]) + badjlist[v] = Vector{T}(undef, degs_in[v]) + end) + + @inbounds( + for e in edge_list + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + fadjlist[s][list_sizes_out[s]] = d + list_sizes_out[s] += 1 + badjlist[d][list_sizes_in[d]] = s + list_sizes_in[d] += 1 + end) + + neg = cleanupedges!(fadjlist, badjlist) + g = SimpleDiGraph{T}() + g.fadjlist = fadjlist + g.badjlist = badjlist + g.ne = neg + + return g +end + + +@inbounds function add_to_lists!(fadjlist::Vector{Vector{T}}, + badjlist::Vector{Vector{T}}, s::T, d::T) where T<:Integer + nvg = length(fadjlist) + nvg_new = max(nvg, s, d) + for v = (nvg+1):nvg_new + push!(fadjlist, Vector{T}()) + push!(badjlist, Vector{T}()) + end + + push!(fadjlist[s], d) + push!(badjlist[d], s) +end + +function _SimpleDiGraphFromIterator(iter)::SimpleDiGraph + T = Union{} + fadjlist = Vector{Vector{T}}() + badjlist = Vector{Vector{T}}() + @inbounds( + for e in iter + typeof(e) <: SimpleDiGraphEdge || + throw(ArgumentError("iter must be an iterator over SimpleEdge")) + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + if T != eltype(e) + T = typejoin(T, eltype(e)) + fadjlist = convert(Vector{Vector{T}}, fadjlist) + badjlist = convert(Vector{Vector{T}}, badjlist) + end + add_to_lists!(fadjlist, badjlist, s, d) + end) + + T == Union{} && return SimpleDiGraph(0) + neg = cleanupedges!(fadjlist, badjlist) + g = SimpleDiGraph{T}() + g.fadjlist = fadjlist + g.badjlist = badjlist + g.ne = neg + + return g +end + +function _SimpleDiGraphFromIterator(iter, ::Type{SimpleDiGraphEdge{T}}) where T<:Integer + fadjlist = Vector{Vector{T}}() + badjlist = Vector{Vector{T}}() + @inbounds( + for e in iter + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + add_to_lists!(fadjlist, badjlist, s, d) + end) + + g = SimpleDiGraph{T}() + neg = cleanupedges!(fadjlist, badjlist) + g.fadjlist = fadjlist + g.badjlist = badjlist + g.ne = neg + + return g +end + +""" + SimpleDiGraphFromIterator(iter) + +Creates a SimpleDiGraph from an iterator iter. The elements in iter must +be of type <: SimpleEdge. +""" +function SimpleDiGraphFromIterator(iter)::SimpleDiGraph + if Base.IteratorEltype(iter) == Base.EltypeUnknown() + return _SimpleDiGraphFromIterator(iter) + end + # if the eltype of iter is known but is a proper supertype of SimpleDiGraphEdge + if !(eltype(iter) <: SimpleDiGraphEdge) && SimpleDiGraphEdge <: eltype(iter) + return _SimpleDiGraphFromIterator(iter) + end + return _SimpleDiGraphFromIterator(iter, eltype(iter)) +end + + edgetype(::SimpleDiGraph{T}) where T<: Integer = SimpleGraphEdge{T} diff --git a/src/SimpleGraphs/simpleedge.jl b/src/SimpleGraphs/simpleedge.jl index cf170b26e..407d7eb93 100644 --- a/src/SimpleGraphs/simpleedge.jl +++ b/src/SimpleGraphs/simpleedge.jl @@ -26,7 +26,7 @@ show(io::IO, e::AbstractSimpleEdge) = print(io, "Edge $(e.src) => $(e.dst)") Pair(e::AbstractSimpleEdge) = Pair(src(e), dst(e)) Tuple(e::AbstractSimpleEdge) = (src(e), dst(e)) -(::Type{SimpleEdge{T}})(e::AbstractSimpleEdge) where T <: Integer = SimpleEdge{T}(T(e.src), T(e.dst)) +SimpleEdge{T}(e::AbstractSimpleEdge) where T <: Integer = SimpleEdge{T}(T(e.src), T(e.dst)) # Convenience functions reverse(e::T) where T<:AbstractSimpleEdge = T(dst(e), src(e)) diff --git a/src/SimpleGraphs/simplegraph.jl b/src/SimpleGraphs/simplegraph.jl index 07ce62fd8..807159915 100644 --- a/src/SimpleGraphs/simplegraph.jl +++ b/src/SimpleGraphs/simplegraph.jl @@ -5,7 +5,7 @@ const SimpleGraphEdge = SimpleEdge A type representing an undirected graph. """ -mutable struct SimpleGraph{T<:Integer} <: AbstractSimpleGraph{T} +mutable struct SimpleGraph{T <: Integer} <: AbstractSimpleGraph{T} ne::Int fadjlist::Vector{Vector{T}} # [src]: (dst, dst, dst) end @@ -13,7 +13,7 @@ end eltype(x::SimpleGraph{T}) where T = T # Graph{UInt8}(6), Graph{Int16}(7), Graph{UInt8}() -function (::Type{SimpleGraph{T}})(n::Integer = 0) where T<:Integer +function SimpleGraph{T}(n::Integer=0) where T <: Integer fadjlist = [Vector{T}() for _ = one(T):n] return SimpleGraph{T}(0, fadjlist) end @@ -22,27 +22,26 @@ end SimpleGraph() = SimpleGraph{Int}() # SimpleGraph(6), SimpleGraph(0x5) -SimpleGraph(n::T) where T<:Integer = SimpleGraph{T}(n) +SimpleGraph(n::T) where T <: Integer = SimpleGraph{T}(n) # SimpleGraph(UInt8) -SimpleGraph(::Type{T}) where T<:Integer = SimpleGraph{T}(zero(T)) +SimpleGraph(::Type{T}) where T <: Integer = SimpleGraph{T}(zero(T)) # Graph{UInt8}(adjmx) -function (::Type{SimpleGraph{T}})(adjmx::AbstractMatrix) where T<:Integer +function SimpleGraph{T}(adjmx::AbstractMatrix) where T <: Integer dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) - issymmetric(adjmx) || throw(ArgumentError("Adjacency / distance matrices must be symmetric")) + LinearAlgebra.issymmetric(adjmx) || throw(ArgumentError("Adjacency / distance matrices must be symmetric")) g = SimpleGraph(T(dima)) - @inbounds for i in find(triu(adjmx)) - ind = ind2sub((dima, dimb), i) - add_edge!(g, ind...) + @inbounds for i in findall(triu(adjmx) .!= 0) + add_edge!(g, i[1], i[2]) end return g end # converts Graph{Int} to Graph{Int32} -function (::Type{SimpleGraph{T}})(g::SimpleGraph) where T<:Integer +function SimpleGraph{T}(g::SimpleGraph) where T <: Integer h_fadj = [Vector{T}(x) for x in fadj(g)] return SimpleGraph(ne(g), h_fadj) end @@ -72,7 +71,145 @@ function SimpleGraph(g::SimpleDiGraph) return SimpleGraph(edgect ÷ 2, newfadj) end -edgetype(::SimpleGraph{T}) where T<:Integer = SimpleGraphEdge{T} +@inbounds function cleanupedges!(fadjlist::Vector{Vector{T}}) where T <: Integer + neg = 0 + for v in 1:length(fadjlist) + if !issorted(fadjlist[v]) + sort!(fadjlist[v]) + end + unique!(fadjlist[v]) + neg += length(fadjlist[v]) + # self-loops should count as one edge + for w in fadjlist[v] + if w == v + neg += 1 + break + end + end + end + return neg ÷ 2 +end + +function SimpleGraph(edge_list::Vector{SimpleGraphEdge{T}}) where T <: Integer + nvg = zero(T) + @inbounds( + for e in edge_list + nvg = max(nvg, src(e), dst(e)) + end) + + list_sizes = ones(Int, nvg) + degs = zeros(Int, nvg) + @inbounds( + for e in edge_list + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + degs[s] += 1 + if s != d + degs[d] += 1 + end + end) + + fadjlist = Vector{Vector{T}}(undef, nvg) + @inbounds( + for v in 1:nvg + fadjlist[v] = Vector{T}(undef, degs[v]) + end) + + @inbounds( + for e in edge_list + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + fadjlist[s][list_sizes[s]] = d + list_sizes[s] += 1 + if s != d + fadjlist[d][list_sizes[d]] = s + list_sizes[d] += 1 + end + end) + + neg = cleanupedges!(fadjlist) + g = SimpleGraph{T}() + g.fadjlist = fadjlist + g.ne = neg + + return g +end + + +@inbounds function add_to_fadjlist!(fadjlist::Vector{Vector{T}}, s::T, d::T) where T <: Integer + nvg = length(fadjlist) + nvg_new = max(nvg, s, d) + for v = (nvg + 1):nvg_new + push!(fadjlist, Vector{T}()) + end + + push!(fadjlist[s], d) + if s != d + push!(fadjlist[d], s) + end +end + +function _SimpleGraphFromIterator(iter)::SimpleGraph + T = Union{} + fadjlist = Vector{Vector{T}}() + @inbounds( + for e in iter + typeof(e) <: SimpleGraphEdge || + throw(ArgumentError("iter must be an iterator over SimpleEdge")) + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + if T != eltype(e) + T = typejoin(T, eltype(e)) + fadjlist = convert(Vector{Vector{T}}, fadjlist) + end + add_to_fadjlist!(fadjlist, s, d) + end) + + T == Union{} && return SimpleGraph(0) + neg = cleanupedges!(fadjlist) + g = SimpleGraph{T}() + g.fadjlist = fadjlist + g.ne = neg + + return g +end + +function _SimpleGraphFromIterator(iter, ::Type{SimpleGraphEdge{T}}) where T <: Integer + fadjlist = Vector{Vector{T}}() + @inbounds( + for e in iter + s, d = src(e), dst(e) + (s >= 1 && d >= 1) || continue + add_to_fadjlist!(fadjlist, s, d) + end) + + neg = cleanupedges!(fadjlist) + g = SimpleGraph{T}() + g.fadjlist = fadjlist + g.ne = neg + + return g +end + +""" + SimpleGraphFromIterator(iter) + +Creates a SimpleGraph from an iterator iter. The elements in iter must +be of type <: SimpleEdge. +""" +function SimpleGraphFromIterator(iter)::SimpleGraph + if Base.IteratorEltype(iter) == Base.EltypeUnknown() + return _SimpleGraphFromIterator(iter) + end + # if the eltype of iter is know but is a proper supertype of SimpleDiEdge + if !(eltype(iter) <: SimpleGraphEdge) && SimpleGraphEdge <: eltype(iter) + return _SimpleGraphFromIterator(iter) + end + return _SimpleGraphFromIterator(iter, eltype(iter)) +end + + +edgetype(::SimpleGraph{T}) where T <: Integer = SimpleGraphEdge{T} """ badj(g::SimpleGraph[, v::Integer]) diff --git a/src/biconnectivity/articulation.jl b/src/biconnectivity/articulation.jl index 5773a9421..37ad2c1af 100644 --- a/src/biconnectivity/articulation.jl +++ b/src/biconnectivity/articulation.jl @@ -29,7 +29,7 @@ function visit!(state::Articulations, g::AbstractGraph, u::Integer, v::Integer) state.depth[v] = state.id state.low[v] = state.depth[v] - for w in out_neighbors(g, v) + for w in outneighbors(g, v) if state.depth[w] == 0 children += 1 visit!(state, g, v, w) @@ -61,5 +61,5 @@ function articulation(g::AbstractGraph) visit!(state, g, u, u) end end - return find(state.articulation_points) + return findall(state.articulation_points) end diff --git a/src/biconnectivity/biconnect.jl b/src/biconnectivity/biconnect.jl index 264749327..bab4b4c64 100644 --- a/src/biconnectivity/biconnect.jl +++ b/src/biconnectivity/biconnect.jl @@ -28,7 +28,7 @@ function visit!(g::AbstractGraph, state::Biconnections, u::Integer, v::Integer) state.depth[v] = state.id state.low[v] = state.depth[v] - for w in out_neighbors(g, v) + for w in outneighbors(g, v) if state.depth[w] == 0 children += 1 push!(state.stack, Edge(min(v, w), max(v, w))) @@ -53,7 +53,7 @@ function visit!(g::AbstractGraph, state::Biconnections, u::Integer, v::Integer) end end -@doc_str """ +""" biconnected_components(g) Compute the [biconnected components](https://en.wikipedia.org/wiki/Biconnected_component) diff --git a/src/centrality/betweenness.jl b/src/centrality/betweenness.jl index 8759c1bd6..d525e0191 100644 --- a/src/centrality/betweenness.jl +++ b/src/centrality/betweenness.jl @@ -2,7 +2,7 @@ # TODO - weighted, separate unweighted, edge betweenness -@doc_str """ +""" betweenness_centrality(g[, vs]) betweenness_centrality(g, k) parallel_betweenness_centrality(g[, vs]) @@ -75,7 +75,7 @@ function parallel_betweenness_centrality( # Parallel reduction - betweenness = @parallel (+) for s in vs + betweenness = Distributed.@distributed (+) for s in vs temp_betweenness = zeros(n_v) if degree(g, s) > 0 # this might be 1? state = dijkstra_shortest_paths(g, s, distmx; allpaths=true, trackvertices=true) diff --git a/src/centrality/closeness.jl b/src/centrality/closeness.jl index 1dcc2fcdb..dfd40eb53 100644 --- a/src/centrality/closeness.jl +++ b/src/centrality/closeness.jl @@ -1,4 +1,4 @@ -@doc_str """ +""" closeness_centrality(g) Calculate the [closeness centrality](https://en.wikipedia.org/wiki/Centrality#Closeness_centrality) @@ -9,9 +9,8 @@ of the graph `g`. Return a vector representing the centrality calculated for eac node `n` by ``\\frac{|δ_n|}{|V|-1}``, where ``δ_n`` is the set of vertices reachable from node `n`. """ -function closeness_centrality( - g::AbstractGraph, - distmx::AbstractMatrix = weights(g); +function closeness_centrality(g::AbstractGraph, + distmx::AbstractMatrix=weights(g); normalize=true) n_v = nv(g) @@ -37,16 +36,15 @@ function closeness_centrality( return closeness end -function parallel_closeness_centrality( - g::AbstractGraph, - distmx::AbstractMatrix = weights(g); +function parallel_closeness_centrality(g::AbstractGraph, + distmx::AbstractMatrix=weights(g); normalize=true)::Vector{Float64} n_v = Int(nv(g)) - closeness = SharedVector{Float64}(n_v) + closeness = SharedArrays.SharedVector{Float64}(n_v) - @sync @parallel for u in vertices(g) + Distributed.@sync Distributed.@distributed for u in vertices(g) if degree(g, u) == 0 # no need to do Dijkstra here closeness[u] = 0.0 else @@ -63,5 +61,5 @@ function parallel_closeness_centrality( end end end - return sdata(closeness) + return SharedArrays.sdata(closeness) end diff --git a/src/centrality/eigenvector.jl b/src/centrality/eigenvector.jl index 191734450..936a50c03 100644 --- a/src/centrality/eigenvector.jl +++ b/src/centrality/eigenvector.jl @@ -24,4 +24,4 @@ eigenvector of the adjacency matrix \$\\mathbf{A}\$. - Mark E. J. Newman: Networks: An Introduction. Oxford University Press, USA, 2010, pp. 169. """ -eigenvector_centrality(g::AbstractGraph) = abs.(vec(eigs(adjacency_matrix(g), nev=1)[2]))::Vector{Float64} +eigenvector_centrality(g::AbstractGraph) = abs.(vec(IterativeEigensolvers.eigs(adjacency_matrix(g), nev=1)[2]))::Vector{Float64} diff --git a/src/centrality/katz.jl b/src/centrality/katz.jl index 3e474192c..5f6f43dca 100644 --- a/src/centrality/katz.jl +++ b/src/centrality/katz.jl @@ -30,9 +30,9 @@ the centrality calculated for each node in `g`. function katz_centrality(g::AbstractGraph, α::Real=0.3) nvg = nv(g) v = ones(Float64, nvg) - spI = speye(Float64, nvg) + spI = SparseArrays.sparse(one(Float64) * LinearAlgebra.I, nvg, nvg) A = adjacency_matrix(g, Bool; dir=:in) v = (spI - α * A) \ v - v /= norm(v) + v /= LinearAlgebra.norm(v) return v end diff --git a/src/centrality/radiality.jl b/src/centrality/radiality.jl index defba647f..11395ffb3 100644 --- a/src/centrality/radiality.jl +++ b/src/centrality/radiality.jl @@ -28,23 +28,23 @@ function radiality_centrality(g::AbstractGraph)::Vector{Float64} dmtr = max(dmtr, maximum(d.dists)) meandists[v] = sum(d.dists) / (n_v - 1) # ignore the source vx end - meandists = (dmtr + 1).-(meandists) - return meandists./dmtr + meandists = (dmtr + 1) .- (meandists) + return meandists ./ dmtr end function parallel_radiality_centrality(g::AbstractGraph)::Vector{Float64} n_v = nv(g) vs = vertices(g) n = ne(g) - meandists = SharedVector{Float64}(Int(n_v)) - maxdists = SharedVector{Float64}(Int(n_v)) + meandists = SharedArrays.SharedVector{Float64}(Int(n_v)) + maxdists = SharedArrays.SharedVector{Float64}(Int(n_v)) - @sync @parallel for i = 1:n_v + Distributed.@sync Distributed.@distributed for i = 1:n_v d = dijkstra_shortest_paths(g, vs[i]) maxdists[i] = maximum(d.dists) meandists[i] = sum(d.dists) / (n_v - 1) end dmtr = maximum(maxdists) radialities = collect(meandists) - return ((dmtr + 1).-radialities)./dmtr + return ((dmtr + 1) .- radialities) ./ dmtr end diff --git a/src/centrality/stress.jl b/src/centrality/stress.jl index 57ac177d7..287c56b3d 100644 --- a/src/centrality/stress.jl +++ b/src/centrality/stress.jl @@ -43,7 +43,7 @@ function parallel_stress_centrality( # Parallel reduction - stress = @parallel (+) for s in vs + stress = Distributed.@distributed (+) for s in vs temp_stress = zeros(Int, n_v) if degree(g, s) > 0 # this might be 1? state = dijkstra_shortest_paths(g, s; allpaths=true, trackvertices=true) diff --git a/src/community/clique_percolation.jl b/src/community/clique_percolation.jl new file mode 100644 index 000000000..45e51d2dd --- /dev/null +++ b/src/community/clique_percolation.jl @@ -0,0 +1,34 @@ +""" + clique_percolation(g, k=3) + +Community detection using the clique percolation algorithm. Communities are potentionally overlapping. +Return a vector of vectors `c` such that `c[i]` is the set of vertices in community `i`. +The parameter `k` defines the size of the clique to use in percolation. + +### References +- [Palla G, Derenyi I, Farkas I J, et al.] (https://www.nature.com/articles/nature03607) +""" +function clique_percolation end + +@traitfn function clique_percolation(g::::(!IsDirected); k = 3) + kcliques = filter(x->length(x)>=k, maximal_cliques(g)) + nc = length(kcliques) + # graph with nodes represent k-cliques + h = Graph(nc) + # vector for counting common nodes between two cliques efficiently + x = falses(nv(g)) + for i = 1:nc + x[kcliques[i]] .= true + for j = i+1:nc + sum(x[kcliques[j]]) >= k-1 && add_edge!(h, i, j) + end + # reset status + x[kcliques[i]] .= false + end + components = connected_components(h) + communities = [BitSet() for i=1:length(components)] + for (i,component) in enumerate(components) + push!(communities[i], vcat(kcliques[component]...)...) + end + return communities +end diff --git a/src/community/cliques.jl b/src/community/cliques.jl index 67a949e18..0e32ee07c 100644 --- a/src/community/cliques.jl +++ b/src/community/cliques.jl @@ -24,7 +24,7 @@ julia> maximal_cliques(g) """ function maximal_cliques end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function maximal_cliques{T, AG<:AbstractGraph{T}}(g::AG::(!IsDirected)) +@traitfn function maximal_cliques(g::AG::(!IsDirected)) where {T, AG<:AbstractGraph{T}} # Cache nbrs and find first pivot (highest degree) maxconn = -1 # uncomment this when https://github.com/JuliaLang/julia/issues/23618 is fixed @@ -38,7 +38,7 @@ function maximal_cliques end for n in vertices(g) nbrs = Set{T}() - union!(nbrs, out_neighbors(g, n)) + union!(nbrs, outneighbors(g, n)) delete!(nbrs, n) # ignore edges between n and itself conn = length(nbrs) if conn > maxconn diff --git a/src/community/clustering.jl b/src/community/clustering.jl index f950825c4..656c62f29 100644 --- a/src/community/clustering.jl +++ b/src/community/clustering.jl @@ -21,7 +21,7 @@ function local_clustering!(storage::AbstractVector{Bool}, g::AbstractGraph, v::I k <= 1 && return (0, 0) neighs = neighbors(g, v) tcount = 0 - storage[neighs] = true + storage[neighs] .= true @inbounds for i in neighs @inbounds for j in neighbors(g, i) @@ -41,12 +41,12 @@ function local_clustering!(storage::AbstractVector{Bool}, i = 0 for (i, v) in enumerate(vs) ntriang[i], nalltriang[i] = local_clustering!(storage, g, v) - storage[neighbors(g, v)] = false + storage[neighbors(g, v)] .= false end return ntriang, nalltriang end -@doc_str """ +""" local_clustering(g, v) local_clustering(g, vs) diff --git a/src/community/core-periphery.jl b/src/community/core-periphery.jl index 91ad51a4c..3ca7e1227 100644 --- a/src/community/core-periphery.jl +++ b/src/community/core-periphery.jl @@ -21,7 +21,7 @@ function core_periphery_deg end kbest = k end end - c = 2 + zeros(Int, nv(g)) - c[p[1:kbest]] = 1 + c = fill(2, nv(g)) + c[p[1:kbest]] .= 1 c end diff --git a/src/community/label_propagation.jl b/src/community/label_propagation.jl index b3a8e47f7..96cd37392 100644 --- a/src/community/label_propagation.jl +++ b/src/community/label_propagation.jl @@ -12,10 +12,10 @@ the second is the convergence history for each node. Will return after function label_propagation(g::AbstractGraph{T}, maxiter=1000) where T n = nv(g) label = collect(one(T):n) - active_vs = IntSet(vertices(g)) + active_vs = BitSet(vertices(g)) c = NeighComm(collect(one(T):n), fill(-1, n), one(T)) convergence_hist = Vector{Int}() - random_order = Vector{T}(n) + random_order = Vector{T}(undef, n) i = 0 while !isempty(active_vs) && i < maxiter num_active = length(active_vs) @@ -32,7 +32,7 @@ function label_propagation(g::AbstractGraph{T}, maxiter=1000) where T old_comm = label[u] label[u] = vote!(g, label, c, u) if old_comm != label[u] - for v in out_neighbors(g, u) + for v in outneighbors(g, u) push!(active_vs, v) end else @@ -85,7 +85,7 @@ function vote!(g::AbstractGraph, m::Vector, c::NeighComm, u::Integer) c.neigh_cnt[c.neigh_pos[1]] = 0 c.neigh_last = 2 max_cnt = 0 - for neigh in out_neighbors(g, u) + for neigh in outneighbors(g, u) neigh_comm = m[neigh] if c.neigh_cnt[neigh_comm] < 0 c.neigh_cnt[neigh_comm] = 0 diff --git a/src/connectivity.jl b/src/connectivity.jl index 6ef1f9d29..5fa9b24ca 100644 --- a/src/connectivity.jl +++ b/src/connectivity.jl @@ -12,7 +12,7 @@ This algorithm is linear in the number of edges of the graph. """ function connected_components! end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function connected_components!{T, AG<:AbstractGraph{T}}(label::AbstractVector, g::AG::(!IsDirected)) +@traitfn function connected_components!(label::AbstractVector, g::AG::(!IsDirected)) where {T, AG<:AbstractGraph{T}} nvg = nv(g) for u in vertices(g) @@ -21,8 +21,8 @@ function connected_components! end Q = Vector{T}() push!(Q, u) while !isempty(Q) - src = shift!(Q) - for vertex in out_neighbors(g, src) + src = popfirst!(Q) + for vertex in outneighbors(g, src) if label[vertex] == zero(T) push!(Q, vertex) label[vertex] = u @@ -85,7 +85,7 @@ For directed graphs, see [`strongly_connected_components`](@ref) and """ function connected_components end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function connected_components{T, AG<:AbstractGraph{T}}(g::AG::(!IsDirected)) +@traitfn function connected_components(g::AG::(!IsDirected)) where {T, AG<:AbstractGraph{T}} label = zeros(T, nv(g)) connected_components!(label, g) c, d = components(label) @@ -131,7 +131,7 @@ The order of the components is not part of the API contract. """ function strongly_connected_components end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function strongly_connected_components{T, AG<:AbstractGraph{T}}(g::AG::IsDirected) +@traitfn function strongly_connected_components(g::AG::IsDirected) where {T, AG<:AbstractGraph{T}} zero_t = zero(T) one_t = one(T) nvg = nv(g) @@ -159,7 +159,7 @@ function strongly_connected_components end while !isempty(dfs_stack) v = dfs_stack[end] #end is the most recently added item u = zero_t - for n in out_neighbors(g, v) + for n in outneighbors(g, v) if index[n] == zero_t # unvisited neighbor found u = n @@ -231,7 +231,7 @@ Will throw an error if the graph is not strongly connected. """ function period end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function period{T, AG<:AbstractGraph{T}}(g::AG::IsDirected) +@traitfn function period(g::AG::IsDirected) where {T, AG<:AbstractGraph{T}} !is_strongly_connected(g) && throw(ArgumentError("Graph must be strongly connected")) # First check if there's a self loop @@ -260,13 +260,13 @@ in the directed graph `g`. If `scc` is missing, generate the strongly connected components first. """ function condensation end -@traitfn function condensation{T<:Integer}(g::::IsDirected, scc::Vector{Vector{T}}) +@traitfn function condensation(g::::IsDirected, scc::Vector{Vector{T}}) where T<:Integer h = DiGraph{T}(length(scc)) - component = Vector{T}(nv(g)) + component = Vector{T}(undef, nv(g)) for (i, s) in enumerate(scc) - @inbounds component[s] = i + @inbounds component[s] .= i end @inbounds for e in edges(g) @@ -290,7 +290,7 @@ connected components in which the components do not have any leaving edges. """ function attracting_components end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function attracting_components{T, AG<:AbstractGraph{T}}(g::AG::IsDirected) +@traitfn function attracting_components(g::AG::IsDirected) where {T, AG<:AbstractGraph{T}} scc = strongly_connected_components(g) cond = condensation(g, scc) @@ -321,7 +321,7 @@ neighborhood(g::AbstractGraph{T}, v::Integer, d, distmx::AbstractMatrix{U}=weigh first.(neighborhood_dists(g, v, d, distmx; dir=dir)) neighborhood_dists(g::AbstractGraph{T}, v::Integer, d, distmx::AbstractMatrix{U}=weights(g); dir=:out) where T<:Integer where U<:Real = - (dir == :out) ? _neighborhood(g, v, d, distmx, out_neighbors) : _neighborhood(g, v, d, distmx, in_neighbors) + (dir == :out) ? _neighborhood(g, v, d, distmx, outneighbors) : _neighborhood(g, v, d, distmx, inneighbors) function _neighborhood(g::AbstractGraph{T}, v::Integer, d::Real, distmx::AbstractMatrix{U}, neighborfn::Function) where T<:Integer where U<:Real @@ -334,7 +334,7 @@ function _neighborhood(g::AbstractGraph{T}, v::Integer, d::Real, distmx::Abstrac dists = fill(typemax(U), nv(g)) dists[v] = zero(U) while !isempty(Q) - src = shift!(Q) + src = popfirst!(Q) seen[src] && continue seen[src] = true currdist = dists[src] diff --git a/src/core.jl b/src/core.jl index 3e3f82519..d9468c7c9 100644 --- a/src/core.jl +++ b/src/core.jl @@ -29,7 +29,7 @@ add_vertices!(g::AbstractGraph, n::Integer) = sum([add_vertex!(g) for i = 1:n]) Return a vector corresponding to the number of edges which end at each vertex in graph `g`. If `v` is specified, only return degrees for vertices in `v`. """ -indegree(g::AbstractGraph, v::Integer) = length(in_neighbors(g, v)) +indegree(g::AbstractGraph, v::Integer) = length(inneighbors(g, v)) indegree(g::AbstractGraph, v::AbstractVector = vertices(g)) = [indegree(g, x) for x in v] """ @@ -38,7 +38,7 @@ indegree(g::AbstractGraph, v::AbstractVector = vertices(g)) = [indegree(g, x) fo Return a vector corresponding to the number of edges which start at each vertex in graph `g`. If `v` is specified, only return degrees for vertices in `v`. """ -outdegree(g::AbstractGraph, v::Integer) = length(out_neighbors(g, v)) +outdegree(g::AbstractGraph, v::Integer) = length(outneighbors(g, v)) outdegree(g::AbstractGraph, v::AbstractVector = vertices(g)) = [outdegree(g, x) for x in v] """ @@ -134,26 +134,26 @@ end neighbors(g, v) Return a list of all neighbors reachable from vertex `v` in `g`. -For directed graphs, the default is equivalent to [`out_neighbors`](@ref); +For directed graphs, the default is equivalent to [`outneighbors`](@ref); use [`all_neighbors`](@ref) to list inbound and outbound neighbors. ### Implementation Notes Returns a reference, not a copy. Do not modify result. """ -neighbors(g::AbstractGraph, v::Integer) = out_neighbors(g, v) +neighbors(g::AbstractGraph, v::Integer) = outneighbors(g, v) """ all_neighbors(g, v) Return a list of all inbound and outbound neighbors of `v` in `g`. -For undirected graphs, this is equivalent to both [`out_neighbors`](@ref) -and [`in_neighbors`](@ref). +For undirected graphs, this is equivalent to both [`outneighbors`](@ref) +and [`inneighbors`](@ref). ### Implementation Notes Returns a reference, not a copy. Do not modify result. """ function all_neighbors end @traitfn all_neighbors(g::::IsDirected, v::Integer) = - union(out_neighbors(g, v), in_neighbors(g, v)) + union(outneighbors(g, v), inneighbors(g, v)) @traitfn all_neighbors(g::::(!IsDirected), v::Integer) = neighbors(g, v) @@ -183,7 +183,7 @@ Return the number of self loops in `g`. """ num_self_loops(g::AbstractGraph) = nv(g) == 0 ? 0 : sum(v -> has_edge(g, v, v), vertices(g)) -@doc_str """ +""" density(g) Return the density of `g`. Density is defined as the ratio of the number of actual edges to the diff --git a/src/degeneracy.jl b/src/degeneracy.jl index f862f22d6..baf0b03e4 100644 --- a/src/degeneracy.jl +++ b/src/degeneracy.jl @@ -25,7 +25,7 @@ function core_number(g::AbstractGraph{T}) where T curr_degree = 0 for (i, v) in enumerate(vs) if degrees[v] > curr_degree - append!(bin_boundaries, repmat([i], (degrees[v] - curr_degree))) + append!(bin_boundaries, repeat([i], (degrees[v] - curr_degree))) curr_degree = degrees[v] end end @@ -71,7 +71,7 @@ function k_core(g::AbstractGraph, k=-1; corenum=core_number(g)) k = maximum(corenum) # max core end - return find(x -> x >= k, corenum) + return findall(x -> x >= k, corenum) end """ @@ -99,7 +99,7 @@ function k_shell(g::AbstractGraph, k=-1; corenum=core_number(g)) if k == -1 k = maximum(corenum) end - return find(x -> x == k, corenum) + return findall(x -> x == k, corenum) end """ @@ -127,7 +127,7 @@ function k_crust(g, k=-1; corenum=core_number(g)) if k == -1 k = maximum(corenum) - 1 end - return find(x -> x <= k, corenum) + return findall(x -> x <= k, corenum) end """ @@ -154,5 +154,5 @@ function k_corona(g::AbstractGraph, k; corenum=core_number(g)) kcoreg = g[kcore] kcoredeg = degree(kcoreg) - return kcore[findin(kcoredeg, k)] + return kcore[findall(x-> x == k, kcoredeg)] end diff --git a/src/deprecations.jl b/src/deprecations.jl index 7b30ba9eb..e09818451 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -1,8 +1,2 @@ -@deprecate in_edges in_neighbors -@deprecate out_edges out_neighbors -@deprecate adjacency_matrix(g::AbstractGraph, dir::Symbol, T::DataType) adjacency_matrix(g, T; dir=dir) -@deprecate adjacency_matrix(g::AbstractGraph, dir::Symbol) adjacency_matrix(g; dir=dir) -@deprecate laplacian_matrix(g::AbstractGraph, dir::Symbol, T::DataType) laplacian_matrix(g, T; dir=dir) -@deprecate laplacian_matrix(g::AbstractGraph, dir::Symbol) laplacian_matrix(g; dir=dir) -@deprecate laplacian_spectrum(g::AbstractGraph, dir::Symbol) laplacian_spectrum(g; dir=dir) - +@deprecate in_neighbors inneighbors +@deprecate out_neighbors outneighbors diff --git a/src/digraph/cycles/hadwick-james.jl b/src/digraph/cycles/hadwick-james.jl index ebcd780c8..60f170942 100644 --- a/src/digraph/cycles/hadwick-james.jl +++ b/src/digraph/cycles/hadwick-james.jl @@ -9,7 +9,7 @@ of Hadwick & James. """ function simplecycles_hadwick_james end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function simplecycles_hadwick_james{T, AG<:AbstractGraph{T}}(g::AG::IsDirected) +@traitfn function simplecycles_hadwick_james(g::AG::IsDirected) where {T, AG<:AbstractGraph{T}} nvg = nv(g) B = Vector{T}[Vector{T}() for i in vertices(g)] blocked = zeros(Bool, nvg) @@ -43,12 +43,12 @@ resetblocked!(blocked) = fill!(blocked, false) Find circuits in `g` recursively starting from v1. """ function circuit_recursive! end -@traitfn function circuit_recursive!{T<:Integer}(g::::IsDirected, v1::T, v2::T, blocked::AbstractVector, B::Vector{Vector{T}}, stack::Vector{T}, cycles::Vector{Vector{T}}) +@traitfn function circuit_recursive!(g::::IsDirected, v1::T, v2::T, blocked::AbstractVector, B::Vector{Vector{T}}, stack::Vector{T}, cycles::Vector{Vector{T}}) where T<:Integer f = false push!(stack, v2) blocked[v2] = true - Av = out_neighbors(g, v2) + Av = outneighbors(g, v2) for w in Av (w < v1) && continue if w == v1 # Found a circuit diff --git a/src/digraph/cycles/johnson.jl b/src/digraph/cycles/johnson.jl index 5b2b1393d..4cfdaebf8 100644 --- a/src/digraph/cycles/johnson.jl +++ b/src/digraph/cycles/johnson.jl @@ -22,7 +22,7 @@ The formula is coming from [Johnson, 1973](Johnson). maxsimplecycles(n::Integer) = sum(x -> ncycles_n_i(n, x), 1:(n - 1)) -@doc_str """ +""" maxsimplecycles(dg::::IsDirected, byscc::Bool = true) Compute the theoretical maximum number of cycles in the directed graph `dg`. @@ -81,7 +81,7 @@ end Constructor of the visitor, using the directed graph information. """ JohnsonVisitor(dg::DiGraph{T}) where T<:Integer = - JohnsonVisitor(Vector{T}(), falses(vertices(dg)), [Set{T}() for i in vertices(dg)]) + JohnsonVisitor(Vector{T}(), falses(nv(dg)), [Set{T}() for i in vertices(dg)]) """ unblock!{T<:Integer}(v::T, blocked::BitArray, B::Vector{Set{T}}) @@ -102,7 +102,7 @@ function unblock!(v::T, blocked::BitArray, B::Vector{Set{T}}) where T<:Integer end end -@doc_str """ +""" circuit{T<:Integer}(v::T, dg::::IsDirected, vis::JohnsonVisitor{T}, allcycles::Vector{Vector{T}}, vmap::Vector{T}, startnode::T = v) @@ -127,12 +127,12 @@ recursive version. Modify the vector of cycles, when needed. - [Johnson](http://epubs.siam.org/doi/abs/10.1137/0204007) """ function circuit end -@traitfn function circuit{T<:Integer}(v::T, dg::::IsDirected, vis::JohnsonVisitor{T}, -allcycles::Vector{Vector{T}}, vmap::Vector{T}, startnode::T = v) +@traitfn function circuit(v::T, dg::::IsDirected, vis::JohnsonVisitor{T}, +allcycles::Vector{Vector{T}}, vmap::Vector{T}, startnode::T = v) where T<:Integer done = false push!(vis.stack, v) vis.blocked[v] = true - for w in out_neighbors(dg, v) + for w in outneighbors(dg, v) if w == startnode push!(allcycles, vmap[vis.stack]) done = true @@ -143,7 +143,7 @@ allcycles::Vector{Vector{T}}, vmap::Vector{T}, startnode::T = v) if done unblock!(v, vis.blocked, vis.blockedmap) else - for w in out_neighbors(dg, v) + for w in outneighbors(dg, v) if !in(vis.blockedmap[w], v) push!(vis.blockedmap[w], v) end @@ -154,7 +154,7 @@ allcycles::Vector{Vector{T}}, vmap::Vector{T}, startnode::T = v) end -@doc_str """ +""" simplecycles(dg::::IsDirected) Compute all cycles of the given directed graph, using @@ -186,7 +186,7 @@ end ########################################################## #### Iterative version, using Tasks, of the previous algorithms. -@doc_str """ +""" circuit_iter{T<:Integer}(v::T, dg::::IsDirected, vis::JohnsonVisitor{T}, vmap::Vector{T}, cycle::Channel, startnode::T = v) @@ -215,12 +215,12 @@ the same as v, otherwise it should be passed. - [Johnson](http://epubs.siam.org/doi/abs/10.1137/0204007) """ function circuit_iter end -@traitfn function circuit_iter{T<:Integer}(v::T, dg::::IsDirected, vis::JohnsonVisitor{T}, -vmap::Vector{T}, cycle::Channel, startnode::T = v) +@traitfn function circuit_iter(v::T, dg::::IsDirected, vis::JohnsonVisitor{T}, +vmap::Vector{T}, cycle::Channel, startnode::T = v) where T<:Integer done = false push!(vis.stack, v) vis.blocked[v] = true - for w in out_neighbors(dg, v) + for w in outneighbors(dg, v) if w == startnode put!(cycle, vmap[vis.stack]) done = true @@ -231,7 +231,7 @@ vmap::Vector{T}, cycle::Channel, startnode::T = v) if done unblock!(v, vis.blocked, vis.blockedmap) else - for w in out_neighbors(dg, v) + for w in outneighbors(dg, v) if !in(vis.blockedmap[w], v) push!(vis.blockedmap[w], v) end @@ -260,14 +260,14 @@ function itercycles end for scc in sccs while length(scc) >= 1 wdg, vmap = induced_subgraph(dg, scc) - shift!(scc) + popfirst!(scc) visitor = JohnsonVisitor(wdg) circuit_iter(1, wdg, visitor, vmap, cycle) end end end -@doc_str """ +""" simplecyclescount(dg::DiGraph, ceiling = 10^6) Count the number of cycles in a directed graph, using @@ -292,7 +292,7 @@ function simplecyclescount end return len end -@doc_str """ +""" simplecycles_iter(dg::DiGraph, ceiling = 10^6) Search all cycles of the given directed graph, using @@ -314,7 +314,7 @@ function simplecycles_iter end @traitfn simplecycles_iter(dg::::IsDirected, ceiling = 10^6) = collect(Iterators.take(Channel(c -> itercycles(dg, c)), ceiling)) -@doc_str """ +""" simplecycleslength(dg::DiGraph, ceiling = 10^6) Search all cycles of the given directed graph, using diff --git a/src/digraph/cycles/karp.jl b/src/digraph/cycles/karp.jl index 855a82e63..8c1e78e61 100644 --- a/src/digraph/cycles/karp.jl +++ b/src/digraph/cycles/karp.jl @@ -16,7 +16,7 @@ function _karp_minimum_cycle_mean( F[1, 1] = 0. for i in 2:n+1 for (j, v) in enumerate(component) - for u in in_neighbors(g, v) + for u in inneighbors(g, v) k = get(v2j, u, 0) if !iszero(k) F[i, j] = min(F[i, j], F[i-1, k] + distmx[u, v]) @@ -60,7 +60,7 @@ function _karp_minimum_cycle_mean( for i in n:-1:1 v = component[walk[i+1]] dmin = Inf - for u in in_neighbors(g, v) + for u in inneighbors(g, v) j = get(v2j, u, 0) if !iszero(j) dcur = F[i, j] + distmx[u, v] diff --git a/src/digraph/transitivity.jl b/src/digraph/transitivity.jl index 1a12641cb..58f1cf2d6 100644 --- a/src/digraph/transitivity.jl +++ b/src/digraph/transitivity.jl @@ -1,4 +1,4 @@ -@doc_str """ +""" transitiveclosure!(g, selflooped=false) Compute the transitive closure of a directed graph, using the Floyd-Warshall @@ -13,16 +13,9 @@ This version of the function modifies the original graph. function transitiveclosure! end @traitfn function transitiveclosure!(g::::IsDirected, selflooped=false) for k in vertices(g) - for i in vertices(g) - i == k && continue - for j in vertices(g) - j == k && continue - if (has_edge(g, i, k) && has_edge(g, k, j)) - if (i != j || selflooped) - add_edge!(g, i, j) - end - end - end + for i in inneighbors(g, k), j in outneighbors(g, k) + ((!selflooped && i == j) || i == k || j == k) && continue + add_edge!(g, i, j) end end return g @@ -42,3 +35,80 @@ function transitiveclosure(g::DiGraph, selflooped = false) copyg = copy(g) return transitiveclosure!(copyg, selflooped) end + +""" + transitivereduction(g; selflooped=false) + +Compute the transitive reduction of a directed graph. If the graph contains +cycles, each strongly connected component is replaced by a directed cycle and +the transitive reduction is calculated on the condensation graph connecting the +components. If `selflooped` is true, self loops on strongly connected components +of size one will be preserved. + +### Performance +Time complexity is \\mathcal{O}(|V||E|). +""" +function transitivereducion end +@traitfn function transitivereduction(g::::IsDirected; selflooped::Bool=false) + scc = strongly_connected_components(g) + cg = condensation(g, scc) + + reachable = Vector{Bool}(undef, nv(cg)) + visited = Vector{Bool}(undef, nv(cg)) + stack = Vector{eltype(cg)}(undef, nv(cg)) + resultg = SimpleDiGraph{eltype(g)}(nv(g)) + +# Calculate the transitive reduction of the acyclic condensation graph. + @inbounds( + for u in vertices(cg) + fill!(reachable, false) # vertices reachable from u on a path of length >= 2 + fill!(visited, false) + stacksize = 0 + for v in outneighbors(cg,u) + @simd for w in outneighbors(cg, v) + if !visited[w] + visited[w] = true + stacksize += 1 + stack[stacksize] = w + end + end + end + while stacksize > 0 + v = stack[stacksize] + stacksize -= 1 + reachable[v] = true + @simd for w in outneighbors(cg, v) + if !visited[w] + visited[w] = true + stacksize += 1 + stack[stacksize] = w + end + end + end +# Add the edges from the condensation graph to the resulting graph. + @simd for v in outneighbors(cg,u) + if !reachable[v] + add_edge!(resultg, scc[u][1], scc[v][1]) + end + end + end) + +# Replace each strongly connected component with a directed cycle. + @inbounds( + for component in scc + nvc = length(component) + if nvc == 1 + if selflooped && has_edge(g, component[1], component[1]) + add_edge!(resultg, component[1], component[1]) + end + continue + end + for i in 1:(nvc-1) + add_edge!(resultg, component[i], component[i+1]) + end + add_edge!(resultg, component[nvc], component[1]) + end) + + return resultg +end + diff --git a/src/distance.jl b/src/distance.jl index 1da9a80e8..e53b89e18 100644 --- a/src/distance.jl +++ b/src/distance.jl @@ -5,7 +5,7 @@ An array-like structure that provides distance values of `1` for any `src, dst` combination. """ -struct DefaultDistance<:AbstractMatrix{Int} +struct DefaultDistance <: AbstractMatrix{Int} nv::Int DefaultDistance(nv::Int=typemax(Int)) = new(nv) end @@ -50,38 +50,32 @@ store, and pass the eccentricities if multiple distance measures are desired. An infinite path length is represented by the `typemax` of the distance matrix. """ -function eccentricity( - g::AbstractGraph, +function eccentricity(g::AbstractGraph, v::Integer, - distmx::AbstractMatrix{T} = weights(g) -) where T <: Real + distmx::AbstractMatrix{T}=weights(g)) where T <: Real e = maximum(dijkstra_shortest_paths(g, v, distmx).dists) e == typemax(T) && warn("Infinite path length detected for vertex $v") return e end -eccentricity( - g::AbstractGraph, - vs::AbstractVector = vertices(g), - distmx::AbstractMatrix = weights(g) -) = [eccentricity(g, v, distmx) for v in vs] +eccentricity(g::AbstractGraph, + vs::AbstractVector=vertices(g), + distmx::AbstractMatrix=weights(g)) = [eccentricity(g, v, distmx) for v in vs] eccentricity(g::AbstractGraph, distmx::AbstractMatrix) = eccentricity(g, vertices(g), distmx) -function parallel_eccentricity( - g::AbstractGraph, - vs::AbstractVector = vertices(g), - distmx::AbstractMatrix{T} = weights(g) -) where T <: Real +function parallel_eccentricity(g::AbstractGraph, + vs::AbstractVector=vertices(g), + distmx::AbstractMatrix{T}=weights(g)) where T <: Real vlen = length(vs) - eccs = SharedVector{T}(vlen) - @sync @parallel for i = 1:vlen + eccs = SharedArrays.SharedVector{T}(vlen) + Distributed.@sync Distributed.@distributed for i = 1:vlen eccs[i] = maximum(dijkstra_shortest_paths(g, vs[i], distmx).dists) end - d = sdata(eccs) + d = SharedArrays.sdata(eccs) maximum(d) == typemax(T) && warn("Infinite path length detected") return d end @@ -98,9 +92,9 @@ Given a graph and optional distance matrix, or a vector of precomputed eccentricities, return the maximum eccentricity of the graph. """ diameter(eccentricities::Vector) = maximum(eccentricities) -diameter(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +diameter(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = maximum(eccentricity(g, distmx)) -parallel_diameter(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +parallel_diameter(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = maximum(parallel_eccentricity(g, distmx)) """ @@ -118,10 +112,10 @@ function periphery(eccentricities::Vector) return filter(x -> eccentricities[x] == diam, 1:length(eccentricities)) end -periphery(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +periphery(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = periphery(eccentricity(g, distmx)) -parallel_periphery(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +parallel_periphery(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = periphery(parallel_eccentricity(g, distmx)) """ @@ -134,9 +128,9 @@ Given a graph and optional distance matrix, or a vector of precomputed eccentricities, return the minimum eccentricity of the graph. """ radius(eccentricities::Vector) = minimum(eccentricities) -radius(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +radius(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = minimum(eccentricity(g, distmx)) -parallel_radius(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +parallel_radius(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = minimum(parallel_eccentricity(g, distmx)) """ @@ -153,8 +147,8 @@ function center(eccentricities::Vector) return filter(x -> eccentricities[x] == rad, 1:length(eccentricities)) end -center(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +center(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = center(eccentricity(g, distmx)) -parallel_center(g::AbstractGraph, distmx::AbstractMatrix = weights(g)) = +parallel_center(g::AbstractGraph, distmx::AbstractMatrix=weights(g)) = center(parallel_eccentricity(g, distmx)) diff --git a/src/edit_distance.jl b/src/edit_distance.jl index 793245a8e..4bd684185 100644 --- a/src/edit_distance.jl +++ b/src/edit_distance.jl @@ -1,4 +1,4 @@ -@doc_str """ +""" edit_distance(G₁::AbstractGraph, G₂::AbstractGraph) Compute the edit distance between graphs `G₁` and `G₂`. Return the minimum @@ -50,16 +50,16 @@ function edit_distance(G₁::AbstractGraph, G₂::AbstractGraph; h(λ) = heuristic(λ, G₁, G₂) # initialize open set - OPEN = PriorityQueue{Vector{Tuple}, Float64}() + OPEN = DataStructures.PriorityQueue{Vector{Tuple}, Float64}() for v in 1:nv(G₂) - enqueue!(OPEN, [(1, v)], subst_cost(1, v) + h([(1, v)])) + DataStructures.enqueue!(OPEN, [(1, v)], subst_cost(1, v) + h([(1, v)])) end - enqueue!(OPEN, [(1, 0)], delete_cost(1) + h([(1, 0)])) + DataStructures.enqueue!(OPEN, [(1, 0)], delete_cost(1) + h([(1, 0)])) while true # minimum (partial) edit path λ, cost = DataStructures.peek(OPEN) - dequeue!(OPEN) + DataStructures.dequeue!(OPEN) if is_complete_path(λ, G₁, G₂) return cost, λ @@ -70,15 +70,15 @@ function edit_distance(G₁::AbstractGraph, G₂::AbstractGraph; if k < nv(G₁) # there are still vertices to process in G₁? for v in vs λ⁺ = [λ; (k + 1, v)] - enqueue!(OPEN, λ⁺, cost + subst_cost(k + 1, v) + h(λ⁺) - h(λ)) + DataStructures.enqueue!(OPEN, λ⁺, cost + subst_cost(k + 1, v) + h(λ⁺) - h(λ)) end λ⁺ = [λ; (k + 1, 0)] - enqueue!(OPEN, λ⁺, cost + delete_cost(k + 1) + h(λ⁺) - h(λ)) + DataStructures.enqueue!(OPEN, λ⁺, cost + delete_cost(k + 1) + h(λ⁺) - h(λ)) else # add remaining vertices of G₂ to the path λ⁺ = [λ; [(0, v) for v in vs]] total_insert_cost = sum(insert_cost, vs) - enqueue!(OPEN, λ⁺, cost + total_insert_cost + h(λ⁺) - h(λ)) + DataStructures.enqueue!(OPEN, λ⁺, cost + total_insert_cost + h(λ⁺) - h(λ)) end end end @@ -119,7 +119,7 @@ vertex v ∈ G₂. `p=1`: the p value for p-norm calculation. """ function MinkowskiCost(μ₁::AbstractVector, μ₂::AbstractVector; p::Real=1) - (u, v) -> norm(μ₁[u] - μ₂[v], p) + (u, v) -> LinearAlgebra.norm(μ₁[u] - μ₂[v], p) end """ @@ -132,5 +132,5 @@ Return value similar to `MinkowskiCost`, but ensure costs smaller than 2τ. `τ=1`: value specifying half of the upper limit of the Minkowski cost. """ function BoundedMinkowskiCost(μ₁::AbstractVector, μ₂::AbstractVector; p::Real=1, τ::Real=1) - (u, v) -> 1 / (1 / (2τ) + exp(-norm(μ₁[u] - μ₂[v], p))) + (u, v) -> 1 / (1 / (2τ) + exp(-LinearAlgebra.norm(μ₁[u] - μ₂[v], p))) end diff --git a/src/flow/boykov_kolmogorov.jl b/src/flow/boykov_kolmogorov.jl deleted file mode 100644 index 6171134bb..000000000 --- a/src/flow/boykov_kolmogorov.jl +++ /dev/null @@ -1,202 +0,0 @@ -""" - boykov_kolmogorov_impl(residual_graph, source, target, capacity_matrix) - -Compute the max-flow/min-cut between `source` and `target` for `residual_graph` -using the Boykov-Kolmogorov algorithm. - -Return the maximum flow in the network, the flow matrix and the partition -`{S,T}` in the form of a vector of 0's, 1's and 2's. - -### References -- BOYKOV, Y.; KOLMOGOROV, V., 2004. An Experimental Comparison of -Min-Cut/Max-Flow Algorithms for Energy Minimization in Vision. - -### Author -- Júlio Hoffimann Mendes (juliohm@stanford.edu) -""" -function boykov_kolmogorov_impl end -# see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function boykov_kolmogorov_impl{T, U, AG<:AbstractGraph{U}}( - residual_graph::AG::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T} # edge flow capacities - ) - n = nv(residual_graph) - - flow = 0 - flow_matrix = zeros(T, n, n) - - TREE = zeros(U, n) - TREE[source] = U(1) - TREE[target] = U(2) - - PARENT = zeros(U, n) - - A = [source, target] - O = Vector{U}() - - while true - # growth stage - path = find_path!(residual_graph, source, target, flow_matrix, capacity_matrix, PARENT, TREE, A) - - isempty(path) && break - - # augmentation stage - flow += augment!(path, flow_matrix, capacity_matrix, PARENT, TREE, O) - - # adoption stage - adopt!(residual_graph, source, target, flow_matrix, capacity_matrix, PARENT, TREE, A, O) - end - - return flow, flow_matrix, TREE -end - -# see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function find_path!{T, AG<:AbstractGraph{T}}( - residual_graph::AG::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - flow_matrix::AbstractMatrix, # the current flow matrix - capacity_matrix::AbstractMatrix, # edge flow capacities - PARENT::Vector, # parent table - TREE::Vector, # tree table - A::Vector # active set - ) - - tree_cap(p, q) = TREE[p] == one(T) ? capacity_matrix[p, q] - flow_matrix[p, q] : - capacity_matrix[q, p] - flow_matrix[q, p] - while !isempty(A) - p = last(A) - for q in neighbors(residual_graph, p) - if tree_cap(p, q) > 0 - if TREE[q] == zero(T) - TREE[q] = TREE[p] - PARENT[q] = p - unshift!(A, q) - end - if TREE[q] ≠ zero(T) && TREE[q] ≠ TREE[p] - # p -> source - path_to_source = [p] - while PARENT[p] ≠ zero(T) - p = PARENT[p] - push!(path_to_source, p) - end - - # q -> target - path_to_target = [q] - while PARENT[q] ≠ zero(T) - q = PARENT[q] - push!(path_to_target, q) - end - - # source -> target - path = [reverse!(path_to_source); path_to_target] - - if path[1] == source && path[end] == target - return path - elseif path[1] == target && path[end] == source - return reverse!(path) - end - end - end - end - pop!(A) - end - - return Vector{T}() -end - -function augment!( - path::AbstractVector, # path from source to target - flow_matrix::AbstractMatrix, # the current flow matrix - capacity_matrix::AbstractMatrix, # edge flow capacities - PARENT::Vector, # parent table - TREE::Vector, # tree table - O::Vector # orphan set - ) - - T = eltype(path) - # bottleneck capacity - Δ = Inf - for i = 1:(length(path) - 1) - p, q = path[i:(i + 1)] - cap = capacity_matrix[p, q] - flow_matrix[p, q] - cap < Δ && (Δ = cap) - end - - # update residual graph - for i = 1:(length(path) - 1) - p, q = path[i:(i + 1)] - flow_matrix[p, q] += Δ - flow_matrix[q, p] -= Δ - - # create orphans - if flow_matrix[p, q] == capacity_matrix[p, q] - if TREE[p] == TREE[q] == one(T) - PARENT[q] = zero(T) - unshift!(O, q) - end - if TREE[p] == TREE[q] == 2 - PARENT[p] = zero(T) - unshift!(O, p) - end - end - end - - return Δ -end - -@traitfn function adopt!{T, AG<:AbstractGraph{T}}( - residual_graph::AG::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - flow_matrix::AbstractMatrix, # the current flow matrix - capacity_matrix::AbstractMatrix, # edge flow capacities - PARENT::Vector, # parent table - TREE::Vector, # tree table - A::Vector, # active set - O::Vector # orphan set - ) - - tree_cap(p, q) = TREE[p] == 1 ? capacity_matrix[p, q] - flow_matrix[p, q] : - capacity_matrix[q, p] - flow_matrix[q, p] - while !isempty(O) - p = pop!(O) - # try to find parent that is not an orphan - parent_found = false - for q in neighbors(residual_graph, p) - if TREE[q] == TREE[p] && tree_cap(q, p) > 0 - # check if "origin" is either source or target - o = q - while PARENT[o] ≠ 0 - o = PARENT[o] - end - if o == source || o == target - parent_found = true - PARENT[p] = q - break - end - end - end - - if !parent_found - # scan all neighbors and make the orphan a free node - for q in neighbors(residual_graph, p) - if TREE[q] == TREE[p] - if tree_cap(q, p) > 0 - unshift!(A, q) - end - if PARENT[q] == p - PARENT[q] = zero(T) - unshift!(O, q) - end - end - end - - TREE[p] = zero(T) - B = setdiff(A, p) - resize!(A, length(B))[:] = B - end - end -end diff --git a/src/flow/dinic.jl b/src/flow/dinic.jl deleted file mode 100644 index bc6f3aa9f..000000000 --- a/src/flow/dinic.jl +++ /dev/null @@ -1,118 +0,0 @@ -""" - function dinic_impl(residual_graph, source, target, capacity_matrix) - -Compute the maximum flow between the `source` and `target` for `residual_graph` -with edge flow capacities in `capacity_matrix` using -[Dinic\'s Algorithm](https://en.wikipedia.org/wiki/Dinic%27s_algorithm). -Return the value of the maximum flow as well as the final flow matrix. -""" -function dinic_impl end -@traitfn function dinic_impl{T}( - residual_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T} # edge flow capacities - ) - n = nv(residual_graph) # number of vertexes - flow_matrix = zeros(T, n, n) # initialize flow matrix - P = zeros(Int, n) # Sharable parent vector - - flow = 0 - - while true - augment = blocking_flow!(residual_graph, source, target, capacity_matrix, flow_matrix, P) - augment == 0 && break - flow += augment - end - return flow, flow_matrix -end - - - - -""" - blocking_flow!(residual_graph, source, target, capacity_matrix, flow-matrix, P) - -Like `blocking_flow`, but requires a preallocated parent vector `P`. -""" -function blocking_flow! end -@traitfn function blocking_flow!{T}( - residual_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T}, # edge flow capacities - flow_matrix::AbstractMatrix, # the current flow matrix - P::AbstractVector{Int} # Parent vector to store Level Graph - ) - n = nv(residual_graph) # number of vertexes - fill!(P, -1) - P[source] = -2 - - Q = [source] - sizehint!(Q, n) - - while length(Q) > 0 # Construct the Level Graph using BFS - u = pop!(Q) - for v in out_neighbors(residual_graph, u) - if P[v] == -1 && capacity_matrix[u, v] > flow_matrix[u, v] - P[v] = u - unshift!(Q, v) - end - end - end - - P[target] == -1 && return 0 # BFS couldn't reach the target - - total_flow = 0 - - for bv in in_neighbors(residual_graph, target) # Trace all possible routes to source - flow = typemax(T) - v = target - u = bv - while v != source - if u == -1 # Vertex unreachable from source - flow = 0 - break - else - flow = min(flow, capacity_matrix[u, v] - flow_matrix[u, v]) - v = u - u = P[u] - end - end - - flow == 0 && continue # Flow cannot be augmented along path - - v = target - u = bv - while v != source # Augment flow along path - flow_matrix[u, v] += flow - flow_matrix[v, u] -= flow - v = u - u = P[u] - end - - total_flow += flow - end - return total_flow -end - -""" - blocking_flow(residual_graph, source, target, capacity_matrix, flow-matrix) - -Use BFS to identify a blocking flow in the `residual_graph` with current flow -matrix `flow_matrix`and then backtrack from `target` to `source`, -augmenting flow along all possible paths. -""" -blocking_flow( - residual_graph::AbstractGraph, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - flow_matrix::AbstractMatrix, # the current flow matrix - ) = blocking_flow!( - residual_graph, - source, - target, - capacity_matrix, - flow_matrix, - zeros(Int, nv(residual_graph))) diff --git a/src/flow/edmonds_karp.jl b/src/flow/edmonds_karp.jl deleted file mode 100644 index 91f3ad89e..000000000 --- a/src/flow/edmonds_karp.jl +++ /dev/null @@ -1,168 +0,0 @@ -""" - edmonds_karp_impl(residual_graph, source, target, capacity_matrix) - -Compute the maximum flow in flow graph `residual_graph` between `source` and -`target` and capacities defined in `capacity_matrix` using the -[Edmonds-Karp algorithm](https://en.wikipedia.org/wiki/Edmondss%E2%80%93Karp_algorithm). -Return the value of the maximum flow as well as the final flow matrix. -""" -function edmonds_karp_impl end -@traitfn function edmonds_karp_impl{T}( - residual_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T} # edge flow capacities - ) - n = nv(residual_graph) # number of vertexes - flow = 0 - flow_matrix = zeros(T, n, n) # initialize flow matrix - P = zeros(Int, n) - S = zeros(Int, n) - while true - fill!(P, -1) - fill!(S, -1) - v, P, S, flag = fetch_path!(residual_graph, source, target, flow_matrix, capacity_matrix, P, S) - - if flag != 0 # no more valid paths - break - else - path = [Int(v)] # initialize path - sizehint!(path, n) - - u = v - while u != source # trace path from v to source - u = P[u] - push!(path, u) - end - reverse!(path) - - u = v # trace path from v to target - while u != target - u = S[u] - push!(path, Int(u)) - end - # augment flow along path - flow += augment_path!(path, flow_matrix, capacity_matrix) - end - end - - return flow, flow_matrix -end - -""" - augment_path!(path, flow_matrix, capacity_matrix) - -Calculate the amount by which flow can be augmented in the given path. -Augment the flow and returns the augment value. -""" -function augment_path!( - path::Vector{Int}, # input path - flow_matrix::AbstractMatrix{T}, # the current flow matrix - capacity_matrix::AbstractMatrix # edge flow capacities - ) where T - augment = typemax(T) # initialize augment - for i in 1:(length(path) - 1) # calculate min capacity along path - u = path[i] - v = path[i + 1] - augment = min(augment, capacity_matrix[u, v] - flow_matrix[u, v]) - end - - for i in 1:(length(path) - 1) # augment flow along path - u = path[i] - v = path[i + 1] - flow_matrix[u, v] += augment - flow_matrix[v, u] -= augment - end - - return augment -end - -""" - fetch_path!(residual_graph, source, target, flow_matrix, capacity_matrix, P, S) - -Like `fetch_path`, but requires preallocated parent vector `P` and successor -vector `S`. -""" -function fetch_path! end -@traitfn function fetch_path!( - residual_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - flow_matrix::AbstractMatrix, # the current flow matrix - capacity_matrix::AbstractMatrix, # edge flow capacities - P::Vector{Int}, # parent table of path init to -1s - S::Vector{Int} # successor table of path init to -1s - ) - n = nv(residual_graph) - - P[source] = -2 - S[target] = -2 - - Q_f = [source] # forward queue - sizehint!(Q_f, n) - - Q_r = [target] # reverse queue - sizehint!(Q_r, n) - - while true - if length(Q_f) <= length(Q_r) - u = pop!(Q_f) - for v in out_neighbors(residual_graph, u) - if capacity_matrix[u, v] - flow_matrix[u, v] > 0 && P[v] == -1 - P[v] = u - if S[v] == -1 - unshift!(Q_f, v) - else - return v, P, S, 0 # 0 indicates success - end - end - end - - length(Q_f) == 0 && return 0, P, S, 1 # No paths to target - else - v = pop!(Q_r) - for u in in_neighbors(residual_graph, v) - if capacity_matrix[u, v] - flow_matrix[u, v] > 0 && S[u] == -1 - S[u] = v - P[u] != -1 && return u, P, S, 0 # 0 indicates success - - unshift!(Q_r, u) - end - - end - - length(Q_r) == 0 && return 0, P, S, 2 # No paths to source - end - end -end - - -""" - fetch_path(residual_graph, source, target, flow_matrix, capacity_matrix) - - -Use bidirectional BFS to look for augmentable paths from `source` to `target` in -`residual_graph`. Return the vertex where the two BFS searches intersect, -the parent table of the path, the successor table of the path found, and a -flag indicating success (0 => success; 1 => no path to target, 2 => no path -to source). -""" -function fetch_path end -@traitfn function fetch_path( - residual_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - flow_matrix::AbstractMatrix, # the current flow matrix - capacity_matrix::AbstractMatrix # edge flow capacities - ) - n = nv(residual_graph) - P = fill(-1, n) - S = fill(-1, n) - return fetch_path!(residual_graph, - source, - target, - flow_matrix, - capacity_matrix, - P, - S) -end diff --git a/src/flow/ext_multiroute_flow.jl b/src/flow/ext_multiroute_flow.jl deleted file mode 100644 index b1d921560..000000000 --- a/src/flow/ext_multiroute_flow.jl +++ /dev/null @@ -1,252 +0,0 @@ -""" - emrf(flow_graph, source, target, capacity_matrix, flow_algorithm, routes=0) - -Compute the maximum multiroute flow (for any number of `route`s) -between `source` and `target` in `flow_graph` via flow algorithm `flow_algorithm`. - -If a number of routes is given, return the value of the multiroute flow as -well as the final flow matrix, along with a multiroute cut if the -Boykov-Kolmogorov max-flow algorithm is used as a subroutine. -Otherwise, return the vector of breaking points of the parametric -multiroute flow function. - -### References -- [Extended Multiroute Flow algorithm](http://dx.doi.org/10.1016/j.disopt.2016.05.002) -""" -# EMRF (Extended Multiroute Flow) algorithms -function emrf( - flow_graph::AbstractGraph, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - flow_algorithm::AbstractFlowAlgorithm, # keyword argument for algorithm - routes::Real = 0 - ) - breakingpoints = breakingPoints(flow_graph, source, target, capacity_matrix) - if routes > 0 - x, f = intersection(breakingpoints, routes) - return maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = flow_algorithm, restriction = x) - end - return breakingpoints -end - -@doc_str """ - auxiliaryPoints(flow_graph, source, target, capacity_matrix) - -Output a set of (point, slope) that compose the restricted max-flow function -of `flow_graph` from `source to `target` using capacities in `capacity_matrix`. - -### Performance -One point by possible slope is enough (hence ``\\mathcal{O}(λ×maximum_flow)`` complexity). -""" -function auxiliaryPoints end -@traitfn function auxiliaryPoints( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix # edge flow capacities - ) - # Problem descriptors - λ = maximum_flow(flow_graph, source, target)[1] # Connectivity - n = nv(flow_graph) # number of vertices - r1, r2 = minmaxCapacity(capacity_matrix) # restriction left (1) and right (2) - auxpoints = fill((0., 0.), λ + 1) - - # Initialisation of left side (1) - f1, F1, cut1 = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = BoykovKolmogorovAlgorithm(), restriction = r1) - s1 = slope(flow_graph, capacity_matrix, cut1, r1) # left slope - auxpoints[λ + 1 - s1] = (r1, f1) # Add left initial auxiliary point - - # Initialisation of right side (2) - f2, F2, cut2 = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = BoykovKolmogorovAlgorithm(), restriction = r2) - s2 = slope(flow_graph, capacity_matrix, cut2, r2) # right slope - auxpoints[λ + 1 - s2] = (r2, f2) # Add right initial auxiliary point - - # Loop if the slopes are distinct by at least 2 - if s1 > s2 + 1 - queue = [((f1, s1, r1), (f2, s2, r2))] - - while !isempty(queue) - # Computes an intersection (middle) with a new associated slope - (f1, s1, r1), (f2, s2, r2) = pop!(queue) - r, expectedflow = intersection(r1, f1, s1, r2, f2, s2) - f, F, cut = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = BoykovKolmogorovAlgorithm(), restriction = r) - s = slope(flow_graph, capacity_matrix, max.(cut, 1), r) # current slope - auxpoints[λ + 1 - s] = (r, f) - # If the flow at the intersection (middle) is as expected - if expectedflow ≉ f # approximatively not equal (enforced by floating precision) - # if the slope difference between (middle) and left is at least 2 - # push (left),(middle) - if s1 > s + 1 && !approximately_equal((r2, f2), (r, f)) - q = (f1, s1, r1), (f, s, r) - push!(queue, q) - end - # if the slope difference between (middle) and right is at least 2 - # push (middle),(right) - if s > s2 + 1 && !approximately_equal((r1, f1), (r, f)) - q = (f, s, r), (f2, s2, r2) - push!(queue, q) - end - end - end - end - return auxpoints -end - -""" - breakingPoints(flow_graph::::IsDirected, source, target, capacity_matrix) - -Calculates the breaking of the restricted max-flow from a set of auxiliary points -for `flow_graph` from `source to `target` using capacities in `capacity_matrix`. -""" -function breakingPoints end -@traitfn function breakingPoints{T}( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T} # edge flow capacities - ) - auxpoints = auxiliaryPoints(flow_graph, source, target, capacity_matrix) - λ = length(auxpoints) - 1 - left_index = 1 - breakingpoints = Vector{Tuple{T,T,Int}}() - - for (id, point) in enumerate(auxpoints) - if id == 1 - push!(breakingpoints, (0., 0., λ)) - else - pleft = breakingpoints[left_index] - if point[1] != 0 - x, y = intersection(pleft[1], pleft[2], pleft[3], - point[1], point[2], λ + 1 - id) - push!(breakingpoints, (x, y, λ + 1 - id)) - left_index += 1 - end - end - end - return breakingpoints -end - -""" - minmaxCapacity(capacity_matrix) - -Return the nonzero min and max function of `capacity_matrix`. - -Note: this is more efficient than maximum() / minimum() / extrema() -since we have to ignore zero values. -""" - -# Function to get the nonzero min and max function of a Matrix -# note: this is more efficient than maximum() / minimum() / extrema() -# since we have to ignore zero values. -function minmaxCapacity( - capacity_matrix::AbstractMatrix{T} # edge flow capacities - ) where T - cmin, cmax = typemax(T), typemin(T) - for c in capacity_matrix - if c > zero(T) - cmin = min(cmin, c) - end - cmax = max(cmax, c) - end - return cmin, cmax -end - -""" - slope(flow_graph, capacity_matrix, cut, restriction) - -Return the slope of `flow_graph` using capacities in `capacity_matrix` and -a cut vector `cut`. The slope is initialized at 0 and is incremented for -each edge whose capacity does not exceed `restriction`. -""" -function slope end -# Function to get the slope of the restricted flow -@traitfn function slope( - flow_graph::::IsDirected, # the input graph - capacity_matrix::AbstractMatrix, # edge flow capacities - cut::Vector, # cut information for vertices - restriction::Number # value of the restriction - ) - slope = 0 - for e in edges(flow_graph) - ## Chain comparison to wether an edge cross the cut from the source side of - # the cut to the target side of the cut. Then the edge is selected iff the - # capacity of the edge is larger then the restriction argument. - # cut[dst(e)] == 2 > cut[src(e)] is equivalent to - # cut[dst(e)] == 2 && 2 > cut[src(e)] - # Description of chain comparisons can be found at https://goo.gl/IJpCqe - if cut[dst(e)] == 2 > cut[src(e)] && - capacity_matrix[src(e), dst(e)] > restriction - slope += 1 - end - end - return slope -end - -""" - intersection(x1, y1, a1, x2, y2, a2) - -Return the intersection of two lines defined by `x` and `y` with slopes `a`. -2) A set of segments and a linear function of slope k passing by the origin. -Requires argument: -1) - x1, y1, a1, x2, y2, a2::T<:AbstractFloat # Coordinates/slopes -2) - points::Vector{Tuple{T, T, Int}} # vector of points with T<:AbstractFloat -- k::R<:Real # number of routes (slope of the line) -""" -function intersection( - x1::T, # x coordinate of point 1 - y1::T, # y coordinate of point 1 - a1::Integer, # slope passing by point 1 - x2::T, # x coordinate of point 2 - y2::T, # y coordinate of point 2 - a2::R # slope passing by point 2 - ) where T<:AbstractFloat where R<:Real - - (a1 == a2) && return -1., -1. # result will be ignored in other intersection method - b1 = y1 - a1 * x1 - b2 = y2 - a2 * x2 - x = (b2 - b1) / (a1 - a2) - y = a1 * x + b1 - return x, y -end - - -""" - intersection(points, k) - -Return the intersection of a set of line segments and a line of slope `k` -passing by the origin. Segments are defined as a triple (x, y, slope). -""" -function intersection( - points::Vector{Tuple{T,T,I}}, # vector of breaking points - k::R # number of routes (slope of the line) - ) where T<:AbstractFloat where I<:Integer where R<:Real - λ = points[1][1] # Connectivity - - # Loop over the segments (pair of breaking points) - for (id, p) in enumerate(points[1:(end - 1)]) - if id == 1 - k ≈ λ && return points[2] - else - x, y = intersection(p[1], p[2], p[3], 0., 0., k) - (p[1] ≤ x ≤ points[id + 1][1]) && return x, y - end - end - p = points[end] - return intersection(p[1], p[2], p[3], 0., 0., k) -end - -""" - approximately_equal(a, b) - -Return true if each element in the tuple is approximately equal to its counterpart. - -### Implementation Notes: -This is a separate function because we don't want to hijack isapprox for tuples. -""" -approximately_equal(a::Tuple{T,T}, b::Tuple{T,T}) where T <: AbstractFloat = - a[1] ≈ b[1] && a[2] ≈ b[2] diff --git a/src/flow/kishimoto.jl b/src/flow/kishimoto.jl deleted file mode 100644 index 2edcd2ef4..000000000 --- a/src/flow/kishimoto.jl +++ /dev/null @@ -1,70 +0,0 @@ -# Method when using Boykov-Kolmogorov as a subroutine -# Kishimoto algorithm - -@traitfn function kishimoto( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - flow_algorithm::BoykovKolmogorovAlgorithm, # keyword argument for algorithm - routes::Int # keyword argument for routes - ) - # Initialisation - flow, F, labels = maximum_flow(flow_graph, source, target, - capacity_matrix, algorithm = flow_algorithm) - restriction = flow / routes - flow, F, labels = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = flow_algorithm, restriction = restriction) - - # Loop condition : approximatively not equal is enforced by floating precision - i = 1 - while flow < routes * restriction && flow ≉ routes * restriction - restriction = (flow - i * restriction) / (routes - i) - i += 1 - flow, F, labels = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = flow_algorithm, restriction = restriction) - end - - # End - return flow, F, labels -end - - -""" - kishimoto(flow_graph, source, target, capacity_matrix, flow_algorithm, routes) - -Compute the maximum multiroute flow (for an integer number of `route`s) -between `source` and `target` in `flow_graph` with capacities in `capacity_matrix` -using the [Kishimoto algorithm](http://dx.doi.org/10.1109/ICCS.1992.255031). -Return the value of the multiroute flow as well as the final flow matrix, -along with a multiroute cut if Boykov-Kolmogorov is used as a subroutine. -""" -function kishimoto end -@traitfn function kishimoto( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - flow_algorithm::AbstractFlowAlgorithm, # keyword argument for algorithm - routes::Int # keyword argument for routes - ) - # Initialisation - flow, F = maximum_flow(flow_graph, source, target, - capacity_matrix, algorithm = flow_algorithm) - restriction = flow / routes - - flow, F = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = flow_algorithm, restriction = restriction) - - # Loop condition : approximatively not equal is enforced by floating precision - i = 1 - while flow < routes * restriction && flow ≉ routes * restriction - restriction = (flow - i * restriction) / (routes - i) - i += 1 - flow, F = maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = flow_algorithm, restriction = restriction) - end - - # End - return flow, F -end diff --git a/src/flow/maximum_flow.jl b/src/flow/maximum_flow.jl deleted file mode 100644 index 4234befeb..000000000 --- a/src/flow/maximum_flow.jl +++ /dev/null @@ -1,180 +0,0 @@ -""" - AbstractFlowAlgorithm - -Abstract type that allows users to pass in their preferred algorithm -""" -abstract type AbstractFlowAlgorithm end - -""" - EdmondsKarpAlgorithm <: AbstractFlowAlgorithm - -Forces the maximum_flow function to use the Edmonds–Karp algorithm. -""" -struct EdmondsKarpAlgorithm <: AbstractFlowAlgorithm end - -""" - DinicAlgorithm <: AbstractFlowAlgorithm - -Forces the maximum_flow function to use Dinic's algorithm. -""" -struct DinicAlgorithm <: AbstractFlowAlgorithm end - -""" - BoykovKolmogorovAlgorithm <: AbstractFlowAlgorithm - -Forces the maximum_flow function to use the Boykov-Kolmogorov algorithm. -""" -struct BoykovKolmogorovAlgorithm <: AbstractFlowAlgorithm end - -""" -Forces the maximum_flow function to use the Push-Relabel algorithm. -""" -struct PushRelabelAlgorithm <: AbstractFlowAlgorithm end - -""" - DefaultCapacity{T} - -Structure that returns `1` if a forward edge exists in `flow_graph`, and `0` otherwise. -""" -struct DefaultCapacity{T<:Integer} <: AbstractMatrix{T} - flow_graph::DiGraph - nv::T -end - -@traitfn DefaultCapacity(flow_graph::::IsDirected) = - DefaultCapacity(DiGraph(flow_graph), nv(flow_graph)) - -getindex(d::DefaultCapacity{T}, s::Integer, t::Integer) where T = if has_edge(d.flow_graph, s, t) one(T) else zero(T) end -# isassigned{T<:Integer}(d::DefaultCapacity{T}, u::T, v::T) = (u in 1:d.nv) && (v in 1:d.nv) -size(d::DefaultCapacity) = (Int(d.nv), Int(d.nv)) -transpose(d::DefaultCapacity) = DefaultCapacity(reverse(d.flow_graph)) -ctranspose(d::DefaultCapacity) = DefaultCapacity(reverse(d.flow_graph)) - -""" - residual(flow_graph) - -Return a directed residual graph for a directed `flow_graph`. - -The residual graph comprises the same node list as the orginal flow graph, but -ensures that for each edge (u,v), (v,u) also exists in the graph. This allows -flow in the reverse direction. - -If only the forward edge exists, a reverse edge is created with capacity 0. -If both forward and reverse edges exist, their capacities are left unchanged. -Since the capacities in [`LightGraphs.DefaultDistance`](@ref) cannot be changed, an array of ones -is created. -""" -function residual end -@traitfn residual(flow_graph::::IsDirected) = SimpleDiGraph(Graph(flow_graph)) - -# Method for Edmonds–Karp algorithm - -@traitfn function maximum_flow( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - algorithm::EdmondsKarpAlgorithm # keyword argument for algorithm - ) - residual_graph = residual(flow_graph) - return edmonds_karp_impl(residual_graph, source, target, capacity_matrix) -end - -# Method for Dinic's algorithm - -@traitfn function maximum_flow( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - algorithm::DinicAlgorithm # keyword argument for algorithm - ) - residual_graph = residual(flow_graph) - return dinic_impl(residual_graph, source, target, capacity_matrix) -end - -# Method for Boykov-Kolmogorov algorithm - -@traitfn function maximum_flow( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - algorithm::BoykovKolmogorovAlgorithm # keyword argument for algorithm - ) - residual_graph = residual(flow_graph) - return boykov_kolmogorov_impl(residual_graph, source, target, capacity_matrix) -end - -# Method for Push-relabel algorithm - -@traitfn function maximum_flow( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - algorithm::PushRelabelAlgorithm # keyword argument for algorithm - ) - residual_graph = residual(flow_graph) - return push_relabel(residual_graph, source, target, capacity_matrix) -end - -""" - maximum_flow(flow_graph, source, target[, capacity_matrix][, algorithm][, restriction]) - -Generic maximum_flow function for `flow_graph` from `source` to `target` with -capacities in `capacity_matrix`. -Uses flow algorithm `algorithm` and cutoff restriction `restriction`. - -- If `capacity_matrix` is not specified, `DefaultCapacity(flow_graph)` will be used. -- If `algorithm` is not specified, it will default to [`PushRelabelAlgorithm`](@ref). -- If `restriction` is not specified, it will default to `0`. - -Return a tuple of (maximum flow, flow matrix). For the Boykov-Kolmogorov -algorithm, the associated mincut is returned as a third output. - -### Usage Example: - -```jldoctest -julia> flow_graph = SimpleDiGraph(8) # Create a flow-graph -julia> flow_edges = [ -(1,2,10),(1,3,5),(1,4,15),(2,3,4),(2,5,9), -(2,6,15),(3,4,4),(3,6,8),(4,7,16),(5,6,15), -(5,8,10),(6,7,15),(6,8,10),(7,3,6),(7,8,10) -] - -julia> capacity_matrix = zeros(Int, 8, 8) # Create a capacity matrix - -julia> for e in flow_edges - u, v, f = e - add_edge!(flow_graph, u, v) - capacity_matrix[u,v] = f -end - -julia> f, F = maximum_flow(flow_graph, 1, 8) # Run default maximum_flow (push-relabel) without the capacity_matrix - -julia> f, F = maximum_flow(flow_graph, 1, 8, capacity_matrix) # Run default maximum_flow with the capacity_matrix - -julia> f, F = maximum_flow(flow_graph, 1, 8, capacity_matrix, algorithm=EdmondsKarpAlgorithm()) # Run Edmonds-Karp algorithm - -julia> f, F = maximum_flow(flow_graph, 1, 8, capacity_matrix, algorithm=DinicAlgorithm()) # Run Dinic's algorithm - -julia> f, F, labels = maximum_flow(flow_graph, 1, 8, capacity_matrix, algorithm=BoykovKolmogorovAlgorithm()) # Run Boykov-Kolmogorov algorithm - -``` -""" -function maximum_flow( - flow_graph::AbstractGraph, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix = # edge flow capacities - DefaultCapacity(flow_graph); - algorithm::AbstractFlowAlgorithm = # keyword argument for algorithm - PushRelabelAlgorithm(), - restriction::Real = 0 # keyword argument for restriction max-flow - ) - if restriction > 0 - return maximum_flow(flow_graph, source, target, min.(restriction, capacity_matrix), algorithm) - end - return maximum_flow(flow_graph, source, target, capacity_matrix, algorithm) -end diff --git a/src/flow/multiroute_flow.jl b/src/flow/multiroute_flow.jl deleted file mode 100644 index 559cd30d9..000000000 --- a/src/flow/multiroute_flow.jl +++ /dev/null @@ -1,226 +0,0 @@ -""" - AbstractMultirouteFlowAlgorithm - -Abstract type that allows users to pass in their preferred algorithm. -""" -abstract type AbstractMultirouteFlowAlgorithm end - -""" - KishimotoAlgorithm - -Used to specify the Kishimoto algorithm. -""" -struct KishimotoAlgorithm <: AbstractMultirouteFlowAlgorithm end - -""" - ExtendedMultirouteFlowAlgorithm - -Used to specify the Extended Multiroute Flow algorithm. -""" -struct ExtendedMultirouteFlowAlgorithm <: AbstractMultirouteFlowAlgorithm end - -# Methods when the number of routes is more than the connectivity -# 1) When using Boykov-Kolmogorov as a flow subroutine -# 2) Other flow algorithm -function empty_flow( - capacity_matrix::AbstractMatrix{T}, # edge flow capacities - flow_algorithm::BoykovKolmogorovAlgorithm # keyword argument for algorithm - ) where T<:Real - n = size(capacity_matrix, 1) - return zero(T), zeros(T, n, n), zeros(T, n) -end -# 2) Other flow algorithm -function empty_flow( - capacity_matrix::AbstractMatrix{T}, # edge flow capacities - flow_algorithm::AbstractFlowAlgorithm # keyword argument for algorithm - ) where T<:Real - n = size(capacity_matrix, 1) - return zero(T), zeros(T, n, n) -end - -# Method for Kishimoto algorithm -@traitfn function multiroute_flow( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - flow_algorithm::AbstractFlowAlgorithm, # keyword argument for algorithm - mrf_algorithm::KishimotoAlgorithm, # keyword argument for algorithm - routes::Int # keyword argument for routes - ) - return kishimoto(flow_graph, source, target, capacity_matrix, flow_algorithm, routes) -end - -## Methods for Extended Multiroute Flow Algorithm -#1 When the breaking points are not already known -@traitfn function multiroute_flow( - flow_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix, # edge flow capacities - flow_algorithm::AbstractFlowAlgorithm, # keyword argument for algorithm - mrf_algorithm::ExtendedMultirouteFlowAlgorithm, # keyword argument for algorithm - routes::Real # keyword argument for routes - ) - return emrf(flow_graph, source, target, capacity_matrix, flow_algorithm, routes) -end -#2 When the breaking points are already known -#2-a Output: flow value (paired with the associated restriction) -multiroute_flow( - breakingpoints::Vector{Tuple{T,T,Int}}, # vector of breaking points - routes::R # keyword argument for routes - ) where T<:Real where R<:Real = - intersection(breakingpoints, routes) - -#2-b Output: flow value, flows(, labels) -function multiroute_flow( - breakingpoints::AbstractVector{Tuple{T1,T1,Int}}, # vector of breaking points - routes::R, # keyword argument for routes - flow_graph::AbstractGraph, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T2} = # edge flow capacities - DefaultCapacity(flow_graph); - flow_algorithm::AbstractFlowAlgorithm = # keyword argument for algorithm - PushRelabelAlgorithm() - ) where T2 where T1<:Real where R<:Real - x, f = intersection(breakingpoints, routes) - # For other cases, capacities need to be Floats - if !(T2<:AbstractFloat) - capacity_matrix = convert(AbstractMatrix{Float64}, capacity_matrix) - end - - return maximum_flow(flow_graph, source, target, capacity_matrix, - algorithm = flow_algorithm, restriction = x) -end - -### TODO: CLEAN UP THIS FUNCTION AND DOCUMENTATION. THERE SHOULD BE NO NEED TO -### HAVE A TYPE-UNSTABLE FUNCTION HERE. (sbromberger 2017-03-26) -""" - multiroute_flow(flow_graph, source, target[, DefaultCapacity][, flow_algorithm][, mrf_algorithm][, routes]) - -The generic multiroute_flow function. - -The output will vary depending on the input: - -- When the number of `route`s is `0`, return the set of breaking points of -the multiroute flow. -- When the number of `route`s is `1`, return a flow with a set of 1-disjoint paths -(this is the classical max-flow implementation). -- When the input is limited to a set of breaking points and a route value `k`, -return only the k-route flow. -- Otherwise, a tuple with 1) the maximum flow and 2) the flow matrix. When the -max-flow subroutine is the Boykov-Kolmogorov algorithm, the associated mincut is -returned as a third output. - -When the input is a network, it requires the following arguments: - -- `flow_graph`: the input graph -- `source`: the source vertex -- `target`: the target vertex -- `capacity_matrix`: matrix of edge flow capacities -- `flow_algorithm`: keyword argument for flow algorithm -- `mrf_algorithm`: keyword argument for multiroute flow algorithm -- `routes`: keyword argument for the number of routes - -When the input is only the set of (breaking) points and the number of route, -it requires the following arguments: - -- `breakingpoints`: vector of breaking points -- `routes`: number of routes - -When the input is the set of (breaking) points, the number of routes, -and the network descriptors, it requires the following arguments: - -- `breakingpoints`: vector of breaking points -- `routes`: number of routes -- `flow_graph`: the input graph -- `source`: the source vertex -- `target`: the target vertex -- `capacity_matrix`: matrix of edge flow capacities -- `flow_algorithm`: keyword argument for flow algorithm - -The function defaults to the Push-relabel (classical flow) and Kishimoto -(multiroute) algorithms. Alternatively, the algorithms to be used can also -be specified through keyword arguments. A default capacity of `1` is assumed -for each link if no capacity matrix is provided. - -The `mrf_algorithm` keyword is inforced to Extended Multiroute Flow -in the following cases: - -- The number of routes is non-integer -- The number of routes is 0 or non-specified - -### Usage Example : -(please consult the [`maximum_flow`](@ref) section for options about flow_algorithm -and capacity_matrix) - -```jldoctest -julia> flow_graph = SimpleDiGraph(8) # Create a flow graph - -julia> flow_edges = [ -(1, 2, 10), (1, 3, 5), (1, 4, 15), (2, 3, 4), (2, 5, 9), -(2, 6, 15), (3, 4, 4), (3, 6, 8), (4, 7, 16), (5, 6, 15), -(5, 8, 10), (6, 7, 15), (6, 8, 10), (7, 3, 6), (7, 8, 10) -] - -julia> capacity_matrix = zeros(Int, 8, 8) # Create a capacity matrix - -julia> for e in flow_edges - u, v, f = e - add_edge!(flow_graph, u, v) - capacity_matrix[u, v] = f -end - -julia> f, F = multiroute_flow(flow_graph, 1, 8, capacity_matrix, routes = 2) # Run default multiroute_flow with an integer number of routes = 2 - -julia> f, F = multiroute_flow(flow_graph, 1, 8, capacity_matrix, routes = 1.5) # Run default multiroute_flow with a noninteger number of routes = 1.5 - -julia> points = multiroute_flow(flow_graph, 1, 8, capacity_matrix) # Run default multiroute_flow for all the breaking points values - -julia> f, F = multiroute_flow(points, 1.5) # Then run multiroute flow algorithm for any positive number of routes - -julia> f = multiroute_flow(points, 1.5, valueonly = true) - -julia> f, F, labels = multiroute_flow(flow_graph, 1, 8, capacity_matrix, algorithm = BoykovKolmogorovAlgorithm(), routes = 2) # Run multiroute flow algorithm using Boykov-Kolmogorov algorithm as maximum_flow routine - -``` -""" -function multiroute_flow( - flow_graph::AbstractGraph, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T} = # edge flow capacities - DefaultCapacity(flow_graph); - flow_algorithm::AbstractFlowAlgorithm = # keyword argument for algorithm - PushRelabelAlgorithm(), - mrf_algorithm::AbstractMultirouteFlowAlgorithm = # keyword argument for algorithm - KishimotoAlgorithm(), - routes::R = 0 # keyword argument for number of routes (0 = all values) - ) where T where R <: Real - - # a flow with a set of 1-disjoint paths is a classical max-flow - (routes == 1) && - return maximum_flow(flow_graph, source, target, capacity_matrix, flow_algorithm) - - # routes > λ (connectivity) → f = 0 - λ = maximum_flow(flow_graph, source, target, DefaultCapacity(flow_graph), - algorithm = flow_algorithm)[1] - (routes > λ) && return empty_flow(capacity_matrix, flow_algorithm) - - # For other cases, capacities need to be Floats - if !(T<:AbstractFloat) - capacity_matrix = convert(AbstractMatrix{Float64}, capacity_matrix) - end - - # Ask for all possible values (breaking points) - (routes == 0) && - return emrf(flow_graph, source, target, capacity_matrix, flow_algorithm) - # The number of routes is a float → EMRF - (R <: AbstractFloat) && - return emrf(flow_graph, source, target, capacity_matrix, flow_algorithm, routes) - - # Other calls - return multiroute_flow(flow_graph, source, target, capacity_matrix, - flow_algorithm, mrf_algorithm, routes) -end diff --git a/src/flow/push_relabel.jl b/src/flow/push_relabel.jl deleted file mode 100644 index 303d5a30a..000000000 --- a/src/flow/push_relabel.jl +++ /dev/null @@ -1,205 +0,0 @@ -@doc_str """ - push_relabel(residual_graph, source, target, capacity_matrix) - -Return the maximum flow of `residual_graph` from `source` to `target` using the -FIFO push relabel algorithm with gap heuristic. - -### Performance -Takes approximately ``\\mathcal{O}(|V|^{3})`` time. -""" -function push_relabel end -@traitfn function push_relabel{T}( - residual_graph::::IsDirected, # the input graph - source::Integer, # the source vertex - target::Integer, # the target vertex - capacity_matrix::AbstractMatrix{T} # edge flow capacities - ) - - n = nv(residual_graph) - flow_matrix = zeros(T, n, n) - - height = zeros(Int, n) - height[source] = n - - count = zeros(Int, 2 * n + 1) - count[0 + 1] = n - 1 - count[n + 1] = 1 - - excess = zeros(T, n) - excess[source] = typemax(T) - - active = falses(n) - active[source] = true - active[target] = true - - Q = Array{Int,1}() - sizehint!(Q, n) - - - for v in out_neighbors(residual_graph, source) - push_flow!(residual_graph, source, v, capacity_matrix, flow_matrix, excess, height, active, Q) - end - - while length(Q) > 0 - v = pop!(Q) - active[v] = false - discharge!(residual_graph, v, capacity_matrix, flow_matrix, excess, height, active, count, Q) - end - - return sum([flow_matrix[v, target] for v in in_neighbors(residual_graph, target)]), flow_matrix -end - -""" - enqueue_vertex!(Q, v, active, excess) - -Push inactive node `v` into queue `Q` and activates it. Requires preallocated -`active` and `excess` vectors. -""" - -function enqueue_vertex!( - Q::AbstractVector, - v::Integer, # input vertex - active::AbstractVector{Bool}, - excess::AbstractVector - ) - if !active[v] && excess[v] > 0 - active[v] = true - unshift!(Q, v) - end - return nothing -end - -""" - push_flow!(residual_graph, u, v, capacity_matrix, flow_matrix, excess, height, active, Q) - -Using `residual_graph` with capacities in `capacity_matrix`, push as much flow -as possible through the given edge(`u`, `v`). Requires preallocated `flow_matrix` -matrix, and `excess`, `height, `active`, and `Q` vectors. -""" -function push_flow! end -@traitfn function push_flow!( - residual_graph::::IsDirected, # the input graph - u::Integer, # input from-vertex - v::Integer, # input to-vetex - capacity_matrix::AbstractMatrix, - flow_matrix::AbstractMatrix, - excess::AbstractVector, - height::AbstractVector{Int}, - active::AbstractVector{Bool}, - Q::AbstractVector - ) - flow = min(excess[u], capacity_matrix[u, v] - flow_matrix[u, v]) - - flow == 0 && return nothing - height[u] <= height[v] && return nothing - - flow_matrix[u, v] += flow - flow_matrix[v, u] -= flow - - excess[u] -= flow - excess[v] += flow - - enqueue_vertex!(Q, v, active, excess) - nothing -end - -""" - gap!(residual_graph, h, excess, height, active, count, Q) - -Implement the push-relabel gap heuristic. Relabel all vertices above a cutoff height. -Reduce the number of relabels required. - -Requires arguments: - -- residual_graph::DiGraph # the input graph -- h::Int # cutoff height -- excess::AbstractVector -- height::AbstractVector{Int} -- active::AbstractVector{Bool} -- count::AbstractVector{Int} -- Q::AbstractVector -""" -function gap! end -@traitfn function gap!( - residual_graph::::IsDirected, # the input graph - h::Int, # cutoff height - excess::AbstractVector, - height::AbstractVector{Int}, - active::AbstractVector{Bool}, - count::AbstractVector{Int}, - Q::AbstractVector # FIFO queue - ) - n = nv(residual_graph) - for v in vertices(residual_graph) - height[v] < h && continue - count[height[v] + 1] -= 1 - height[v] = max(height[v], n + 1) - count[height[v] + 1] += 1 - enqueue_vertex!(Q, v, active, excess) - end - nothing -end - -""" - relabel!(residual_graph, v, capacity_matrix, flow_matrix, excess, height, active, count, Q) - -Relabel a node `v` with respect to its neighbors to produce an admissable edge. -""" -function relabel! end -@traitfn function relabel!( - residual_graph::::IsDirected, # the input graph - v::Integer, # input vertex to be relabeled - capacity_matrix::AbstractMatrix, - flow_matrix::AbstractMatrix, - excess::AbstractVector, - height::AbstractVector{Int}, - active::AbstractVector{Bool}, - count::AbstractVector{Int}, - Q::AbstractVector - ) - n = nv(residual_graph) - count[height[v] + 1] -= 1 - height[v] = 2 * n - for to in out_neighbors(residual_graph, v) - if capacity_matrix[v, to] > flow_matrix[v, to] - height[v] = min(height[v], height[to] + 1) - end - end - count[height[v] + 1] += 1 - enqueue_vertex!(Q, v, active, excess) - nothing -end - - -""" - discharge!(residual_graph, v, capacity_matrix, flow_matrix, excess, height, active, count, Q) - -Drain the excess flow out of node `v`. Run the gap heuristic or relabel the -vertex if the excess remains non-zero. -""" -function discharge! end -@traitfn function discharge!( - residual_graph::::IsDirected, # the input graph - v::Integer, # vertex to be discharged - capacity_matrix::AbstractMatrix, - flow_matrix::AbstractMatrix, - excess::AbstractVector, - height::AbstractVector{Int}, - active::AbstractVector{Bool}, - count::AbstractVector{Int}, - Q::AbstractVector # FIFO queue - ) - for to in out_neighbors(residual_graph, v) - excess[v] == 0 && break - push_flow!(residual_graph, v, to, capacity_matrix, flow_matrix, excess, height, active, Q) - end - - if excess[v] > 0 - if count[height[v] + 1] == 1 - gap!(residual_graph, height[v], excess, height, active, count, Q) - else - relabel!(residual_graph, v, capacity_matrix, flow_matrix, excess, height, active, count, Q) - end - end - nothing -end diff --git a/src/generators/euclideangraphs.jl b/src/generators/euclideangraphs.jl index c382b9cd4..36f8ef856 100644 --- a/src/generators/euclideangraphs.jl +++ b/src/generators/euclideangraphs.jl @@ -1,4 +1,4 @@ -@doc_str """ +""" euclidean_graph(N, d; seed=-1, L=1., p=2., cutoff=-1., bc=:open) Generate `N` uniformly distributed points in the box ``[0,L]^{d}`` @@ -8,7 +8,7 @@ a matrix with the points' positions. function euclidean_graph(N::Int, d::Int; L=1., seed = -1, kws...) rng = LightGraphs.getRNG(seed) - points = scale!(rand(rng, d, N), L) + points = LinearAlgebra.rmul!(rand(rng, d, N), L) return (euclidean_graph(points; L=L, kws...)..., points) end @@ -45,11 +45,11 @@ function euclidean_graph(points::Matrix; Δ = points[:, i] - points[:, j] elseif bc == :periodic Δ = abs.(points[:, i] - points[:, j]) - Δ = min.(L - Δ, Δ) + Δ = min.(L .- Δ, Δ) else throw(ArgumentError("$bc is not a valid boundary condition")) end - dist = norm(Δ, p) + dist = LinearAlgebra.norm(Δ, p) if dist < cutoff e = Edge(i, j) add_edge!(g, e) diff --git a/src/generators/randgraphs.jl b/src/generators/randgraphs.jl index 6b6a3488a..ac05d80c2 100644 --- a/src/generators/randgraphs.jl +++ b/src/generators/randgraphs.jl @@ -90,6 +90,57 @@ function erdos_renyi(n::Integer, ne::Integer; is_directed=false, seed::Integer=- return is_directed ? SimpleDiGraph(n, ne, seed=seed) : SimpleGraph(n, ne, seed=seed) end +""" + expected_degree_graph(ω) + +Given a vector of expected degrees `ω` indexed by vertex, create a random undirected graph in which vertices `i` and `j` are +connected with probability `ω[i]*ω[j]/sum(ω)`. + +### Optional Arguments +- `seed=-1`: set the RNG seed. + +### Implementation Notes +The algorithm should work well for `maximum(ω) << sum(ω)`. As `maximum(ω)` approaches `sum(ω)`, some deviations +from the expected values are likely. + +### References +- Connected Components in Random Graphs with Given Expected Degree Sequences, Linyuan Lu and Fan Chung. [https://link.springer.com/article/10.1007%2FPL00012580](https://link.springer.com/article/10.1007%2FPL00012580) +- Efficient Generation of Networks with Given Expected Degrees, Joel C. Miller and Aric Hagberg. [https://doi.org/10.1007/978-3-642-21286-4_10](https://doi.org/10.1007/978-3-642-21286-4_10) +""" +function expected_degree_graph(ω::Vector{T}; seed::Int=-1) where T<:Real + g = Graph(length(ω)) + expected_degree_graph!(g, ω, seed=seed) +end + +function expected_degree_graph!(g::Graph, ω::Vector{T}; seed::Int=-1) where T<:Real + n = length(ω) + @assert all(zero(T) .<= ω .<= n-one(T)) "Elements of ω needs to be at least 0 and at most n-1" + + π = sortperm(ω, rev=true) + rng = getRNG(seed) + + S = sum(ω) + + for u=1:(n-1) + v = u+1 + p = min(ω[π[u]]*ω[π[v]]/S, one(T)) + while v <= n && p > zero(p) + if p != one(T) + v += floor(Int, log(rand(rng))/log(one(T)-p)) + end + if v <= n + q = min(ω[π[u]]*ω[π[v]]/S, one(T)) + if rand(rng) < q/p + add_edge!(g, π[u], π[v]) + end + p = q + v += 1 + end + end + end + return g +end + """ watts_strogatz(n, k, β) @@ -144,9 +195,9 @@ function _suitable(edges::Set{Edge}, potential_edges::Dict{T,T}) where T<:Intege return false end -_try_creation(n::Integer, k::Integer, rng::AbstractRNG) = _try_creation(n, fill(k, n), rng) +_try_creation(n::Integer, k::Integer, rng::Random.AbstractRNG) = _try_creation(n, fill(k, n), rng) -function _try_creation(n::T, k::Vector{T}, rng::AbstractRNG) where T<:Integer +function _try_creation(n::T, k::Vector{T}, rng::Random.AbstractRNG) where T<:Integer edges = Set{Edge}() m = 0 stubs = zeros(T, sum(k)) @@ -160,7 +211,7 @@ function _try_creation(n::T, k::Vector{T}, rng::AbstractRNG) where T<:Integer while !isempty(stubs) potential_edges = Dict{T,T}() - shuffle!(rng, stubs) + Random.shuffle!(rng, stubs) for i in 1:2:length(stubs) s1, s2 = stubs[i:(i + 1)] if (s1 > s2) @@ -247,7 +298,7 @@ function barabasi_albert!(g::AbstractGraph, n::Integer, k::Integer; seed::Int=-1 n0 == n && return g # seed random number generator - seed > 0 && srand(seed) + seed > 0 && Random.srand(seed) # add missing vertices sizehint!(g.fadjlist, n) @@ -266,7 +317,7 @@ function barabasi_albert!(g::AbstractGraph, n::Integer, k::Integer; seed::Int=-1 end # vector of weighted vertices (each node is repeated once for each adjacent edge) - weightedVs = Vector{Int}(2 * (n - n0) * k + 2 * ne(g)) + weightedVs = Vector{Int}(undef, 2 * (n - n0) * k + 2 * ne(g)) # initialize vector of weighted vertices offset = 0 @@ -279,7 +330,7 @@ function barabasi_albert!(g::AbstractGraph, n::Integer, k::Integer; seed::Int=-1 picked = fill(false, n) # vector of targets - targets = Vector{Int}(k) + targets = Vector{Int}(undef, k) for source in (n0 + 1):n # choose k targets from the existing vertices @@ -306,7 +357,7 @@ function barabasi_albert!(g::AbstractGraph, n::Integer, k::Integer; seed::Int=-1 end -@doc_str """ +""" static_fitness_model(m, fitness) Generate a random graph with ``|fitness|`` vertices and `m` edges, @@ -342,7 +393,7 @@ function static_fitness_model(m::Integer, fitness::Vector{T}; seed::Int=-1) wher return g end -@doc_str """ +""" static_fitness_model(m, fitness_out, fitness_in) Generate a random graph with ``|fitness\\_out + fitness\\_in|`` vertices and `m` edges, @@ -398,7 +449,7 @@ function _create_static_fitness_graph!(g::AbstractGraph, m::Integer, cum_fitness end end -@doc_str """ +""" static_scale_free(n, m, α) Generate a random graph with `n` vertices, `m` edges and expected power-law @@ -424,7 +475,7 @@ function static_scale_free(n::Integer, m::Integer, α::Real; seed::Int=-1, finit static_fitness_model(m, fitness, seed=seed) end -@doc_str """ +""" static_scale_free(n, m, α_out, α_in) Generate a random graph with `n` vertices, `m` edges and expected power-law @@ -452,7 +503,7 @@ function static_scale_free(n::Integer, m::Integer, α_out::Real, α_in::Float64; fitness_out = _construct_fitness(n, α_out, finite_size_correction) fitness_in = _construct_fitness(n, α_in, finite_size_correction) # eliminate correlation - shuffle!(fitness_in) + Random.shuffle!(fitness_in) static_fitness_model(m, fitness_out, fitness_in, seed=seed) end @@ -472,7 +523,7 @@ function _construct_fitness(n::Integer, α::Real, finite_size_correction::Bool) return fitness end -@doc_str """ +""" random_regular_graph(n, k) Create a random undirected @@ -514,7 +565,7 @@ function random_regular_graph(n::Integer, k::Integer; seed::Int=-1) return g end -@doc_str """ +""" random_configuration_model(n, ks) Create a random undirected graph according to the [configuration model] @@ -554,7 +605,7 @@ function random_configuration_model(n::Integer, k::Array{T}; seed::Int=-1, check return g end -@doc_str """ +""" random_regular_digraph(n, k) Create a random directed [regular graph](https://en.wikipedia.org/wiki/Regular_graph) @@ -582,23 +633,23 @@ function random_regular_digraph(n::Integer, k::Integer; dir::Symbol=:out, seed:: rng = getRNG(seed) cs = collect(2:n) i = 1 - I = Vector{Int}(n * k) - J = Vector{Int}(n * k) + I = Vector{Int}(undef, n * k) + J = Vector{Int}(undef, n * k) V = fill(true, n * k) for r in 1:n l = ((r - 1) * k + 1):(r * k) - I[l] = r + I[l] .= r J[l] = sample!(rng, cs, k, exclude = r) end if dir == :out - return SimpleDiGraph(sparse(I, J, V, n, n)) + return SimpleDiGraph(SparseArrays.sparse(I, J, V, n, n)) else - return SimpleDiGraph(sparse(I, J, V, n, n)') + return SimpleDiGraph(SparseArrays.sparse(I, J, V, n, n)') end end -@doc_str """ +""" random_tournament_digraph(n) Create a random directed [tournament graph] @@ -620,7 +671,7 @@ function random_tournament_digraph(n::Integer; seed::Int=-1) return g end -@doc_str """ +""" stochastic_block_model(c, n) Return a Graph generated according to the Stochastic Block Model (SBM). @@ -671,7 +722,7 @@ function stochastic_block_model(c::Matrix{T}, n::Vector{U}; seed::Int = -1) wher return g end -@doc_str """ +""" stochastic_block_model(cint, cext, n) Return a Graph generated according to the Stochastic Block Model (SBM), sampling @@ -704,7 +755,7 @@ mutable struct StochasticBlockModel{T<:Integer,P<:Real} n::T nodemap::Array{T} affinities::Matrix{P} - rng::MersenneTwister + rng::Random.MersenneTwister end ==(sbm::StochasticBlockModel, other::StochasticBlockModel) = @@ -738,7 +789,7 @@ and external probabilities `externalp`. function sbmaffinity(internalp::Vector{T}, externalp::Real, sizes::Vector{U}) where T<:Real where U<:Integer numblocks = length(sizes) numblocks == length(internalp) || throw(ArgumentError("Inconsistent input dimensions: internalp, sizes")) - B = diagm(internalp) + externalp * (ones(numblocks, numblocks) - I) + B = LinearAlgebra.diagm(0=>internalp) + externalp * (ones(numblocks, numblocks) - LinearAlgebra.I) return B end @@ -759,10 +810,10 @@ function StochasticBlockModel(internalp::Vector{T}, externalp::Real, end -const biclique = ones(2, 2) - eye(2) +const biclique = ones(2, 2) - Matrix{Float64}(LinearAlgebra.I, 2, 2) #TODO: this documentation needs work. sbromberger 20170326 -@doc_str """ +""" nearbipartiteaffinity(sizes, between, intra) Construct the affinity matrix for a near bipartite SBM. @@ -775,12 +826,12 @@ The blocks are connected with probability `between`. """ function nearbipartiteaffinity(sizes::Vector{T}, between::Real, intra::Real) where T<:Integer numblocks = div(length(sizes), 2) - return kron(between * eye(numblocks), biclique) + eye(2numblocks) * intra + return kron(between * Matrix{Float64}(LinearAlgebra.I, numblocks, numblocks), biclique) + Matrix{Float64}(LinearAlgebra.I, 2*numblocks, 2*numblocks) * intra end #Return a generator for edges from a stochastic block model near-bipartite graph. nearbipartiteaffinity(sizes::Vector{T}, between::Real, inter::Real, noise::Real) where T<:Integer = - nearbipartiteaffinity(sizes, between, inter) + noise + nearbipartiteaffinity(sizes, between, inter) .+ noise nearbipartiteSBM(sizes, between, inter, noise; seed::Int = -1) = StochasticBlockModel(sizes, nearbipartiteaffinity(sizes, between, inter, noise), seed=seed) @@ -790,7 +841,7 @@ nearbipartiteSBM(sizes, between, inter, noise; seed::Int = -1) = Generate a stream of random pairs in `1:n` using random number generator `RNG`. """ -function random_pair(rng::AbstractRNG, n::Integer) +function random_pair(rng::Random.AbstractRNG, n::Integer) f(ch) = begin while true put!(ch, Edge(rand(rng, 1:n), rand(rng, 1:n))) @@ -846,7 +897,7 @@ function blockcounts(sbm::StochasticBlockModel, A::AbstractMatrix) I = collect(1:sbm.n) J = [sbm.nodemap[i] for i in 1:sbm.n] V = ones(sbm.n) - Q = sparse(I, J, V) + Q = SparseArrays.sparse(I, J, V) # Q = Q / Q'Q # @show Q'Q# < 1e-6 return (Q'A) * Q @@ -887,10 +938,10 @@ function kronecker(SCALE, edgefactor, A=0.57, B=0.19, C=0.19) ij .+= 2^(ib - 1) .* (hcat(ii_bit, jj_bit)) end - p = randperm(N) + p = Random.randperm(N) ij = p[ij] - p = randperm(M) + p = Random.randperm(M) ij = ij[p, :] g = SimpleDiGraph(N) diff --git a/src/generators/smallgraphs.jl b/src/generators/smallgraphs.jl index f343a0f27..31f922e04 100644 --- a/src/generators/smallgraphs.jl +++ b/src/generators/smallgraphs.jl @@ -18,7 +18,7 @@ function _make_simple_directed_graph(n::T, edgelist::Vector{Tuple{T,T}}) where T return g end -doc""" +""" smallgraph(s) smallgraph(s) @@ -84,7 +84,7 @@ end function smallgraph(s::AbstractString) ls = lowercase(s) if endswith(ls, "graph") - ls = replace(ls, "graph", "") + ls = replace(ls, "graph" => "") end return smallgraph(Symbol(ls)) @@ -194,7 +194,7 @@ function FruchtGraph() (9, 10), (9, 12), (11, 12) ] - return _make_simple_undirected_graph(20, e) + return _make_simple_undirected_graph(12, e) end diff --git a/src/generators/staticgraphs.jl b/src/generators/staticgraphs.jl index f8cc9c610..8ffa45629 100644 --- a/src/generators/staticgraphs.jl +++ b/src/generators/staticgraphs.jl @@ -18,7 +18,7 @@ function CompleteGraph(n::Integer) end -@doc_str """ +""" CompleteBipartiteGraph(n1, n2) Create an undirected [complete bipartite graph](https://en.wikipedia.org/wiki/Complete_bipartite_graph) @@ -169,7 +169,7 @@ function WheelDiGraph(n::Integer) return g end -@doc_str """ +""" Grid(dims; periodic=false) Create a ``|dims|``-dimensional cubic lattice, with length `dims[i]` @@ -200,7 +200,6 @@ end Create a [binary tree](https://en.wikipedia.org/wiki/Binary_tree) of depth `k`. """ - function BinaryTree(k::Integer) g = SimpleGraph(Int(2^k - 1)) for i in 0:(k - 2) @@ -223,7 +222,7 @@ Create a double complete binary tree with `k` levels. function DoubleBinaryTree(k::Integer) gl = BinaryTree(k) gr = BinaryTree(k) - g = blkdiag(gl, gr) + g = SparseArrays.blockdiag(gl, gr) add_edge!(g, 1, nv(gl) + 1) return g end @@ -242,7 +241,7 @@ function RoachGraph(k::Integer) nopole = SimpleGraph(2) antannae = crosspath(k, nopole) body = crosspath(k, dipole) - roach = blkdiag(antannae, body) + roach = SparseArrays.blockdiag(antannae, body) add_edge!(roach, nv(antannae) - 1, nv(antannae) + 1) add_edge!(roach, nv(antannae), nv(antannae) + 2) return roach diff --git a/src/graphcut/normalized_cut.jl b/src/graphcut/normalized_cut.jl index e44eb1cda..cee7b260c 100644 --- a/src/graphcut/normalized_cut.jl +++ b/src/graphcut/normalized_cut.jl @@ -1,8 +1,8 @@ #computes normalized cut cost for partition `cut` function _normalized_cut_cost(cut, W::AbstractMatrix, D) cut_cost = 0 - for j in indices(W, 2) - for i in indices(W, 1) + for j in axes(W, 2) + for i in axes(W, 1) if cut[i] != cut[j] cut_cost += W[i, j] end @@ -12,13 +12,13 @@ function _normalized_cut_cost(cut, W::AbstractMatrix, D) return cut_cost/sum(D*cut) + cut_cost/sum(D*(.~cut)) end -function _normalized_cut_cost(cut, W::SparseMatrixCSC, D) +function _normalized_cut_cost(cut, W::SparseArrays.SparseMatrixCSC, D) cut_cost = 0 - rows = rowvals(W) - vals = nonzeros(W) + rows = SparseArrays.rowvals(W) + vals = SparseArrays.nonzeros(W) n = size(W, 2) for i = 1:n - for j in nzrange(W, i) + for j in SparseArrays.nzrange(W, i) row = rows[j] if cut[i] != cut[row] cut_cost += vals[j]/2 @@ -32,9 +32,9 @@ function _partition_weightmx(cut, W::AbstractMatrix) nv = length(cut) nv2 = sum(cut) nv1 = nv - nv2 - newvid = Vector{Int}(nv) - vmap1 = Vector{Int}(nv1) - vmap2 = Vector{Int}(nv2) + newvid = Vector{Int}(undef, nv) + vmap1 = Vector{Int}(undef, nv1) + vmap2 = Vector{Int}(undef, nv2) j1 = 1 j2 = 1 for i in eachindex(cut) @@ -52,8 +52,8 @@ function _partition_weightmx(cut, W::AbstractMatrix) W1 = similar(W, (nv1, nv1)) W2 = similar(W, (nv2, nv2)) - for j in indices(W, 2) - for i in indices(W, 1) + for j in axes(W, 2) + for i in axes(W, 1) if cut[i] == cut[j] == false W1[newvid[i], newvid[j]] = W[i, j] elseif cut[i] == cut[j] == true @@ -65,13 +65,13 @@ function _partition_weightmx(cut, W::AbstractMatrix) return (W1, W2, vmap1, vmap2) end -function _partition_weightmx(cut, W::SparseMatrixCSC) +function _partition_weightmx(cut, W::SparseArrays.SparseMatrixCSC) nv = length(cut) nv2 = sum(cut) nv1 = nv - nv2 - newvid = Vector{Int}(nv) - vmap1 = Vector{Int}(nv1) - vmap2 = Vector{Int}(nv2) + newvid = Vector{Int}(undef, nv) + vmap1 = Vector{Int}(undef, nv1) + vmap2 = Vector{Int}(undef, nv2) j1 = 1 j2 = 1 for i in eachindex(cut) @@ -86,13 +86,13 @@ function _partition_weightmx(cut, W::SparseMatrixCSC) end end - rows = rowvals(W) - vals = nonzeros(W) + rows = SparseArrays.rowvals(W) + vals = SparseArrays.nonzeros(W) I1 = Vector{Int}(); I2 = Vector{Int}() J1 = Vector{Int}(); J2 = Vector{Int}() V1 = Vector{Float64}(); V2 = Vector{Float64}() for i = 1:nv - for j in nzrange(W, i) + for j in SparseArrays.nzrange(W, i) row = rows[j] if cut[i] == cut[row] == false push!(I1, newvid[i]) @@ -105,32 +105,32 @@ function _partition_weightmx(cut, W::SparseMatrixCSC) end end end - W1 = sparse(I1, J1, V1) - W2 = sparse(I2, J2, V2) + W1 = SparseArrays.sparse(I1, J1, V1) + W2 = SparseArrays.sparse(I2, J2, V2) return (W1, W2, vmap1, vmap2) end function _recursive_normalized_cut(W, thres=thres, num_cuts=num_cuts) m, n = size(W) - D = Diagonal(vec(sum(W, 2))) + D = LinearAlgebra.Diagonal(vec(sum(W, dims=2))) m == 1 && return [1] #get eigenvector corresponding to second smallest eigenvalue - # v = eigs(D-W, D, nev=2, which=:SR)[2][:,2] + # v = IterativeEigensolvers.eigs(D-W, D, nev=2, which=:SR)[2][:,2] # At least some versions of ARPACK have a bug, this is a workaround invDroot = sqrt.(inv(D)) # equal to Cholesky factorization for diagonal D if n > 10 - ret = eigs(invDroot'*(D-W)*invDroot, nev=2, which=:SR)[2][:,2] + ret = IterativeEigensolvers.eigs(invDroot'*(D-W)*invDroot, nev=2, which=:SR)[2][:,2] else - ret = eigfact(full(invDroot'*(D-W)*invDroot))[:vectors][:,2] + ret = LinearAlgebra.eigen(Matrix(invDroot'*(D-W)*invDroot)).vectors[:,2] end v = invDroot*ret #perform n-cuts with different partitions of v and find best one min_cost = Inf best_thres = -1 - for t in linspace(minimum(v), maximum(v), num_cuts) + for t in range(minimum(v), stop=maximum(v), length=num_cuts) cut = v.>t cost = _normalized_cut_cost(cut, W, D) if cost < min_cost @@ -146,7 +146,7 @@ function _recursive_normalized_cut(W, thres=thres, num_cuts=num_cuts) labels1 = _recursive_normalized_cut(W1, thres, num_cuts) labels2 = _recursive_normalized_cut(W2, thres, num_cuts) - labels = Vector{Int}(m) + labels = Vector{Int}(undef, m) offset = maximum(labels1) for i in eachindex(labels1) diff --git a/src/interface.jl b/src/interface.jl index 14c689ff2..359477996 100644 --- a/src/interface.jl +++ b/src/interface.jl @@ -124,37 +124,6 @@ Return true if the graph is a directed graph; false otherwise. """ is_directed(g) = _NI("is_directed") is_directed(::Type{T}) where T = _NI("is_directed") -""" - add_vertex!(g) - -Add a new vertex to the graph `g`. -Return true if the vertex was added successfully, false otherwise. -""" -add_vertex!(x) = _NI("add_vertex!") - -""" - add_edge!(g, e) - -Add a new edge `e` to `g`. Return false if add fails -(e.g., if vertices are not in the graph, or edge already exists), true otherwise. -""" -add_edge!(x, e) = _NI("add_edge!") - -""" - rem_vertex!(g, v) - -Remove the vertex `v` from graph `g`. Return false if removal fails -(e.g., if vertex is not in the graph), true otherwise. -""" -rem_vertex!(x, v) = _NI("rem_vertex!") - -""" - rem_edge!(g, e) - -Remove the edge `e` from `g`. Return false if edge removal fails -(e.g., if edge does not exist), true otherwise. -""" -rem_edge!(x, e) = _NI("rem_edge!") """ has_vertex(g, v) @@ -174,24 +143,24 @@ calls to `has_edge`, c.f. [`edges`](@ref). has_edge(x, e) = _NI("has_edge") """ - in_neighbors(g, v) + inneighbors(g, v) Return a list of all neighbors connected to vertex `v` by an incoming edge. ### Implementation Notes Returns a reference, not a copy. Do not modify result. """ -in_neighbors(x, v) = _NI("in_neighbors") +inneighbors(x, v) = _NI("inneighbors") """ - out_neighbors(g, v) + outneighbors(g, v) Return a list of all neighbors connected to vertex `v` by an outgoing edge. # Implementation Notes Returns a reference, not a copy. Do not modify result. """ -out_neighbors(x, v) = _NI("out_neighbors") +outneighbors(x, v) = _NI("outneighbors") """ zero(g) diff --git a/src/linalg/LinAlg.jl b/src/linalg/LinAlg.jl index d38e4a3c1..987d11ed1 100644 --- a/src/linalg/LinAlg.jl +++ b/src/linalg/LinAlg.jl @@ -1,13 +1,15 @@ module LinAlg using SimpleTraits +import SparseArrays +import LinearAlgebra +import IterativeEigensolvers using ..LightGraphs -import LightGraphs: IsDirected, adjacency_matrix, laplacian_matrix, laplacian_spectrum, AbstractGraph, in_neighbors, -out_neighbors, all_neighbors, is_directed, nv, ne, has_edge, vertices - -import Base: convert, sparse, size, diag, eltype, ndims, ==, *, .*, issymmetric, A_mul_B!, length, Diagonal +import LightGraphs: IsDirected, AbstractGraph, inneighbors, +outneighbors, all_neighbors, is_directed, nv, ne, has_edge, vertices +import Base: convert, size, eltype, ndims, ==, *, .*, length export convert, SparseMatrix, diff --git a/src/linalg/graphmatrices.jl b/src/linalg/graphmatrices.jl index 56f47b7c5..4f9f071aa 100644 --- a/src/linalg/graphmatrices.jl +++ b/src/linalg/graphmatrices.jl @@ -1,7 +1,7 @@ -const SparseMatrix{T} = SparseMatrixCSC{T,Int64} +const SparseMatrix{T} = SparseArrays.SparseMatrixCSC{T,Int64} """ - GraphMatrix{T} + GraphMatrix{T} An abstract type to allow opertions on any type of graph matrix """ @@ -9,7 +9,7 @@ abstract type GraphMatrix{T} end """ - Adjacency{T} + Adjacency{T} The core Adjacency matrix structure. Keeps the vertex degrees around. Subtypes are used to represent the different normalizations of the adjacency matrix. @@ -25,81 +25,81 @@ abstract type Adjacency{T} <: GraphMatrix{T} end abstract type Laplacian{T} <: GraphMatrix{T} end """ - CombinatorialAdjacency{T,S,V} + CombinatorialAdjacency{T,S,V} The standard adjacency matrix. """ struct CombinatorialAdjacency{T,S,V} <: Adjacency{T} - A::S - D::V + A::S + D::V end function CombinatorialAdjacency(A::SparseMatrix{T}) where T - D = vec(sum(A, 1)) - return CombinatorialAdjacency{T,SparseMatrix{T},typeof(D)}(A, D) + D = vec(sum(A, dims=1)) + return CombinatorialAdjacency{T,SparseMatrix{T},typeof(D)}(A, D) end -@doc_str """ - NormalizedAdjacency{T} +""" + NormalizedAdjacency{T} The normalized adjacency matrix is ``\\hat{A} = D^{-1/2} A D^{-1/2}``. If A is symmetric, then the normalized adjacency is also symmetric with real eigenvalues bounded by [-1, 1]. """ struct NormalizedAdjacency{T} <: Adjacency{T} - A::CombinatorialAdjacency{T} - scalefactor::Vector{T} + A::CombinatorialAdjacency{T} + scalefactor::Vector{T} end function NormalizedAdjacency(adjmat::CombinatorialAdjacency) - sf = adjmat.D.^(-1 / 2) - return NormalizedAdjacency(adjmat, sf) + sf = adjmat.D.^(-1 / 2) + return NormalizedAdjacency(adjmat, sf) end """ - StochasticAdjacency{T} + StochasticAdjacency{T} A transition matrix for the random walk. """ struct StochasticAdjacency{T} <: Adjacency{T} - A::CombinatorialAdjacency{T} - scalefactor::Vector{T} + A::CombinatorialAdjacency{T} + scalefactor::Vector{T} end function StochasticAdjacency(adjmat::CombinatorialAdjacency) - sf = adjmat.D.^(-1) - return StochasticAdjacency(adjmat, sf) + sf = adjmat.D.^(-1) + return StochasticAdjacency(adjmat, sf) end """ - AveragingAdjacency{T} + AveragingAdjacency{T} The matrix whose action is to average over each neighborhood. """ struct AveragingAdjacency{T} <: Adjacency{T} - A::CombinatorialAdjacency{T} - scalefactor::Vector{T} + A::CombinatorialAdjacency{T} + scalefactor::Vector{T} end function AveragingAdjacency(adjmat::CombinatorialAdjacency) - sf = adjmat.D.^(-1) - return AveragingAdjacency(adjmat, sf) + sf = adjmat.D.^(-1) + return AveragingAdjacency(adjmat, sf) end -perron(adjmat::NormalizedAdjacency) = sqrt.(adjmat.A.D) / norm(sqrt.(adjmat.A.D)) +perron(adjmat::NormalizedAdjacency) = sqrt.(adjmat.A.D) / LinearAlgebra.norm(sqrt.(adjmat.A.D)) struct PunchedAdjacency{T} <: Adjacency{T} - A::NormalizedAdjacency{T} - perron::Vector{T} + A::NormalizedAdjacency{T} + perron::Vector{T} end function PunchedAdjacency(adjmat::CombinatorialAdjacency) - perron = sqrt.(adjmat.D) / norm(sqrt.(adjmat.D)) - return PunchedAdjacency(NormalizedAdjacency(adjmat), perron) + perron = sqrt.(adjmat.D) / LinearAlgebra.norm(sqrt.(adjmat.D)) + return PunchedAdjacency(NormalizedAdjacency(adjmat), perron) end perron(m::PunchedAdjacency) = m.perron """ - Noop + Noop A type that represents no action. @@ -109,9 +109,9 @@ different scaled GraphMatrix types. """ struct Noop end -Base.broadcast(::typeof(*), ::Noop, x) = x +Broadcast.broadcasted(::typeof(*), ::Noop, x) = x -Diagonal(::Noop) = Noop() +LinearAlgebra.Diagonal(::Noop) = Noop() ==(g::GraphMatrix, h::GraphMatrix) = typeof(g) == typeof(h) && (g.A == h.A) @@ -130,56 +130,56 @@ prescalefactor(adjmat::StochasticAdjacency) = adjmat.scalefactor struct CombinatorialLaplacian{T} <: Laplacian{T} - A::CombinatorialAdjacency{T} + A::CombinatorialAdjacency{T} end -@doc_str """ - NormalizedLaplacian{T} +""" + NormalizedLaplacian{T} The normalized Laplacian is ``\\hat{L} = I - D^{-1/2} A D^{-1/2}``. If A is symmetric, then the normalized Laplacian is also symmetric with positive eigenvalues bounded by 2. """ struct NormalizedLaplacian{T} <: Laplacian{T} - A::NormalizedAdjacency{T} + A::NormalizedAdjacency{T} end """ - StochasticLaplacian{T} + StochasticLaplacian{T} Laplacian version of the StochasticAdjacency matrix. """ struct StochasticLaplacian{T} <: Laplacian{T} - A::StochasticAdjacency{T} + A::StochasticAdjacency{T} end """ - AveragingLaplacian{T} + AveragingLaplacian{T} Laplacian version of the AveragingAdjacency matrix. """ struct AveragingLaplacian{T} <: Laplacian{T} - A::AveragingAdjacency{T} + A::AveragingAdjacency{T} end -arrayfunctions = (:eltype, :length, :ndims, :size, :strides, :issymmetric) +arrayfunctions = (:eltype, :length, :ndims, :size, :strides) for f in arrayfunctions - @eval $f(a::GraphMatrix) = $f(a.A) + @eval $f(a::GraphMatrix) = $f(a.A) end - +LinearAlgebra.issymmetric(a::GraphMatrix) = LinearAlgebra.issymmetric(a.A) size(a::GraphMatrix, i::Integer) = size(a.A, i) -issymmetric(::StochasticAdjacency) = false -issymmetric(::AveragingAdjacency) = false +LinearAlgebra.issymmetric(::StochasticAdjacency) = false +LinearAlgebra.issymmetric(::AveragingAdjacency) = false """ - degrees(adjmat) + degrees(adjmat) Return the degrees of a graph represented by the [`CombinatorialAdjacency`](@ref) `adjmat`. """ degrees(adjmat::CombinatorialAdjacency) = adjmat.D """ - degrees(graphmx) + degrees(graphmx) Return the degrees of a graph represented by the graph matrix `graphmx`. """ @@ -194,116 +194,116 @@ convert(::Type{CombinatorialAdjacency}, adjmat::Adjacency) = adjmat.A convert(::Type{CombinatorialAdjacency}, adjmat::CombinatorialAdjacency) = adjmat -function sparse(lapl::M) where M<:Laplacian - adjmat = adjacency(lapl) - A = sparse(adjmat) - L = spdiagm(diag(lapl)) - A - return L +function SparseArrays.sparse(lapl::M) where M <: Laplacian + adjmat = adjacency(lapl) + A = SparseArrays.sparse(adjmat) + L = SparseArrays.sparse(LinearAlgebra.Diagonal(SparseArrays.diag(lapl))) - A + return L end -function sparse(adjmat::Adjacency) - A = sparse(adjmat.A) - return Diagonal(prescalefactor(adjmat)) * (A * Diagonal(postscalefactor(adjmat))) +function SparseMatrix(lapl::M) where M <: GraphMatrix + return SparseArrays.sparse(lapl) end -function convert(::Type{SparseMatrix{T}}, adjmat::Adjacency{T}) where T - A = sparse(adjmat.A) - return Diagonal(prescalefactor(adjmat)) * (A * Diagonal(postscalefactor(adjmat))) +function SparseArrays.sparse(adjmat::Adjacency) + A = SparseArrays.sparse(adjmat.A) + return LinearAlgebra.Diagonal(prescalefactor(adjmat)) * (A * LinearAlgebra.Diagonal(postscalefactor(adjmat))) end + function convert(::Type{SparseMatrix{T}}, lapl::Laplacian{T}) where T - adjmat = adjacency(lapl) - A = convert(SparseMatrix{T}, adjmat) - L = spdiagm(diag(lapl)) - A - return L + adjmat = adjacency(lapl) + A = convert(SparseMatrix{T}, adjmat) + L = SparseArrays.sparse(LinearAlgebra.Diagonal(SparseArrays.diag(lapl))) - A + return L end -diag(lapl::CombinatorialLaplacian) = lapl.A.D -diag(lapl::Laplacian) = ones(size(lapl)[2]) +SparseArrays.diag(lapl::CombinatorialLaplacian) = lapl.A.D +SparseArrays.diag(lapl::Laplacian) = ones(size(lapl)[2]) *(x::AbstractArray, ::Noop) = x *(::Noop, x) = x -*(adjmat::Adjacency{T}, x::AbstractVector{T}) where T<:Number = - postscalefactor(adjmat) .* (adjmat.A * (prescalefactor(adjmat) .* x)) +*(adjmat::Adjacency{T}, x::AbstractVector{T}) where T <: Number = + postscalefactor(adjmat) .* (adjmat.A * (prescalefactor(adjmat) .* x)) -*(adjmat::CombinatorialAdjacency{T}, x::AbstractVector{T}) where T<:Number = - adjmat.A * x +*(adjmat::CombinatorialAdjacency{T}, x::AbstractVector{T}) where T <: Number = + adjmat.A * x -*(lapl::Laplacian{T}, x::AbstractVector{T}) where T<:Number = - (diag(lapl) .* x) - (adjacency(lapl) * x) +*(lapl::Laplacian{T}, x::AbstractVector{T}) where T <: Number = + (SparseArrays.diag(lapl) .* x) - (adjacency(lapl) * x) -function *(adjmat::PunchedAdjacency{T}, x::AbstractVector{T}) where T<:Number +function *(adjmat::PunchedAdjacency{T}, x::AbstractVector{T}) where T <: Number y = adjmat.A * x - return y - dot(adjmat.perron, y) * adjmat.perron + return y - LinearAlgebra.dot(adjmat.perron, y) * adjmat.perron end -function A_mul_B!(Y, A::Adjacency, B) +function LinearAlgebra.mul!(Y, A::Adjacency, B) # we need to do 3 matrix products - # Y and B can't overlap in any one call to A_mul_B! - # The last call to A_mul_B! must be (Y, postscalefactor, tmp) + # Y and B can't overlap in any one call to mul! + # The last call to mul! must be (Y, postscalefactor, tmp) # so we need to write to tmp in the second step must be (tmp, A.A, Y) # and the first step (Y, prescalefactor, B) - tmp1 = Diagonal(prescalefactor(A)) * B + tmp1 = LinearAlgebra.Diagonal(prescalefactor(A)) * B tmp = similar(Y) - A_mul_B!(tmp, A.A, tmp1) - return A_mul_B!(Y, Diagonal(postscalefactor(A)), tmp) + LinearAlgebra.mul!(tmp, A.A, tmp1) + return LinearAlgebra.mul!(Y, LinearAlgebra.Diagonal(postscalefactor(A)), tmp) end -A_mul_B!(Y, A::CombinatorialAdjacency, B) = A_mul_B!(Y, A.A, B) +LinearAlgebra.mul!(Y, A::CombinatorialAdjacency, B) = LinearAlgebra.mul!(Y, A.A, B) # You can compute the StochasticAdjacency product without allocating a similar of Y. # This is true for all Adjacency where the postscalefactor is a Noop # at time of writing this is just StochasticAdjacency and CombinatorialAdjacency -function A_mul_B!(Y, A::StochasticAdjacency, B) - tmp = Diagonal(prescalefactor(A)) * B - A_mul_B!(Y, A.A, tmp) +function LinearAlgebra.mul!(Y, A::StochasticAdjacency, B) + tmp = LinearAlgebra.Diagonal(prescalefactor(A)) * B + LinearAlgebra.mul!(Y, A.A, tmp) return Y end -function A_mul_B!(Y, adjmat::PunchedAdjacency, x) +function LinearAlgebra.mul!(Y, adjmat::PunchedAdjacency, x) y = adjmat.A * x - Y[:] = y - dot(adjmat.perron, y) * adjmat.perron + Y[:] = y - LinearAlgebra.dot(adjmat.perron, y) * adjmat.perron return Y end -function A_mul_B!(Y, lapl::Laplacian, B) - A_mul_B!(Y, lapl.A, B) - z = diag(lapl) .* B - Y[:] = z - Y[:] - return Y +function LinearAlgebra.mul!(Y, lapl::Laplacian, B) + LinearAlgebra.mul!(Y, lapl.A, B) + z = SparseArrays.diag(lapl) .* B + Y[:] = z - Y[:] + return Y end """ - symmetrize(A::SparseMatrix, which=:or) + symmetrize(A::SparseMatrix, which=:or) Return a symmetric version of graph (represented by sparse matrix `A`) as a sparse matrix. `which` may be one of `:triu`, `:tril`, `:sum`, or `:or`. Use `:sum` for weighted graphs. """ function symmetrize(A::SparseMatrix, which=:or) - if which == :or - M = A + A' - M.nzval[M.nzval .== 2] = 1 - return M + if which == :or + M = A + SparseArrays.sparse(A') + M.nzval[M.nzval .== 2] .= 1 + return M end - T = A - if which == :triu - T = triu(A) - elseif which == :tril - T = tril(A) + T = A + if which == :triu + T = LinearAlgebra.triu(A) + elseif which == :tril + T = LinearAlgebra.tril(A) elseif which == :sum - T = A + T = A else throw(ArgumentError("$which is not a supported method of symmetrizing a matrix")) end - M = T + T' - return M + M = T + SparseArrays.sparse(T') + return M end """ - symmetrize(adjmat, which=:or) + symmetrize(adjmat, which=:or) Return a symmetric version of graph (represented by [`CombinatorialAdjacency`](@ref) `adjmat`) as a [`CombinatorialAdjacency`](@ref). `which` may be one of `:triu`, `:tril`, `:sum`, or `:or`. @@ -313,18 +313,18 @@ Use `:sum` for weighted graphs. Only works on [`Adjacency`](@ref) because the normalizations don't commute with symmetrization. """ symmetrize(adjmat::CombinatorialAdjacency, which=:or) = - CombinatorialAdjacency(symmetrize(adjmat.A, which)) + CombinatorialAdjacency(symmetrize(adjmat.A, which)) # per #564 -@deprecate A_mul_B!(Y, A::Noop, B) None +# @deprecate LinearAlgebra.mul!(Y, A::Noop, B) None @deprecate convert(::Type{Adjacency}, lapl::Laplacian) None -@deprecate convert(::Type{SparseMatrix}, adjmat::CombinatorialAdjacency) sparse(adjmat) +@deprecate convert(::Type{SparseMatrix}, adjmat::GraphMatrix) SparseArrays.sparse(adjmat) """ - LinAlg + LinAlg A package for using the type system to check types of graph matrices. """ diff --git a/src/linalg/nonbacktracking.jl b/src/linalg/nonbacktracking.jl index 75ca4f2ca..6ce6ff0f9 100644 --- a/src/linalg/nonbacktracking.jl +++ b/src/linalg/nonbacktracking.jl @@ -1,5 +1,5 @@ -@doc_str """ +""" non_backtracking_matrix(g) Return a non-backtracking matrix `B` and an edgemap storing the oriented @@ -30,7 +30,7 @@ function non_backtracking_matrix(g::AbstractGraph) for (e, u) in edgeidmap i, j = src(e), dst(e) - for k in in_neighbors(g, i) + for k in inneighbors(g, i) k == j && continue v = edgeidmap[Edge(k, i)] B[v, u] = 1 @@ -40,7 +40,7 @@ function non_backtracking_matrix(g::AbstractGraph) return B, edgeidmap end -@doc_str """ +""" Nonbacktracking{G} A compact representation of the nonbacktracking operator. @@ -61,7 +61,7 @@ Additionally the `contract!(vertexspace, nbt, edgespace)` method takes vectors represented in the domain of ``B`` and represents them in the domain of the adjacency matrix of `g`. """ -struct Nonbacktracking{G<:AbstractGraph} +struct Nonbacktracking{G <: AbstractGraph} g::G edgeidmap::Dict{Edge,Int} m::Int @@ -85,14 +85,14 @@ end size(nbt::Nonbacktracking) = (nbt.m, nbt.m) eltype(nbt::Nonbacktracking) = Float64 -issymmetric(nbt::Nonbacktracking) = false +LinearAlgebra.issymmetric(nbt::Nonbacktracking) = false -function *(nbt::Nonbacktracking, x::Vector{T}) where T<:Number +function *(nbt::Nonbacktracking, x::Vector{T}) where T <: Number length(x) == nbt.m || error("dimension mismatch") y = zeros(T, length(x)) for (e, u) in nbt.edgeidmap i, j = src(e), dst(e) - for k in in_neighbors(nbt.g, i) + for k in inneighbors(nbt.g, i) k == j && continue v = nbt.edgeidmap[Edge(k, i)] y[v] += x[u] @@ -100,7 +100,7 @@ function *(nbt::Nonbacktracking, x::Vector{T}) where T<:Number end return y end -function A_mul_B!(C, nbt::Nonbacktracking, B) +function LinearAlgebra.mul!(C, nbt::Nonbacktracking, B) # computs C = A * B for i in 1:size(B, 2) C[:, i] = nbt * B[:, i] @@ -114,7 +114,7 @@ function coo_sparse(nbt::Nonbacktracking) I, J = zeros(Int, 0), zeros(Int, 0) for (e, u) in nbt.edgeidmap i, j = src(e), dst(e) - for k in in_neighbors(nbt.g, i) + for k in inneighbors(nbt.g, i) k == j && continue v = nbt.edgeidmap[Edge(k, i)] #= J[u] = v =# @@ -126,10 +126,10 @@ function coo_sparse(nbt::Nonbacktracking) return I, J, 1.0 end -sparse(nbt::Nonbacktracking) = sparse(coo_sparse(nbt)..., nbt.m, nbt.m) +SparseArrays.sparse(nbt::Nonbacktracking) = SparseArrays.sparse(coo_sparse(nbt)..., nbt.m, nbt.m) function *(nbt::Nonbacktracking, x::AbstractMatrix) - y = zeros(x) + y = zero(x) for i in 1:nbt.m y[:, i] = nbt * x[:, i] end diff --git a/src/linalg/spectral.jl b/src/linalg/spectral.jl index c2b781719..31c5504cb 100644 --- a/src/linalg/spectral.jl +++ b/src/linalg/spectral.jl @@ -6,7 +6,7 @@ Return a sparse adjacency matrix for a graph, indexed by `[u, v]` vertices. Non-zero values indicate an edge between `u` and `v`. Users may override the default data type (`Int`) and specify an optional direction. - + ### Optional Arguments `dir=:out`: `:in`, `:out`, or `:both` are currently supported. @@ -19,15 +19,15 @@ function adjacency_matrix(g::AbstractGraph, T::DataType=Int; dir::Symbol=:out) # "opposite" neighbor function. It's faster than taking the transpose # at the end. if (dir == :out) - _adjacency_matrix(g, T, in_neighbors, 1) + _adjacency_matrix(g, T, inneighbors, 1) elseif (dir == :in) - _adjacency_matrix(g, T, out_neighbors, 1) + _adjacency_matrix(g, T, outneighbors, 1) elseif (dir == :both) _adjacency_matrix(g, T, all_neighbors, 1) if is_directed(g) _adjacency_matrix(g, T, all_neighbors, 2) else - _adjacency_matrix(g, T, out_neighbors, 1) + _adjacency_matrix(g, T, outneighbors, 1) end else error("Not implemented") @@ -38,7 +38,7 @@ function _adjacency_matrix(g::AbstractGraph{U}, T::DataType, neighborfn::Functio n_v = nv(g) nz = ne(g) * (is_directed(g) ? 1 : 2) * nzmult colpt = ones(U, n_v + 1) - + rowval = sizehint!(Vector{U}(), nz) selfloops = Vector{U}() for j in 1:n_v # this is by column, not by row. @@ -49,7 +49,7 @@ function _adjacency_matrix(g::AbstractGraph{U}, T::DataType, neighborfn::Functio colpt[j + 1] = colpt[j] + length(dsts) append!(rowval, sort!(dsts)) end - spmx = SparseMatrixCSC(n_v, n_v, colpt, rowval, ones(T, nz)) + spmx = SparseArrays.SparseMatrixCSC(n_v, n_v, colpt, rowval, ones(T, nz)) # this is inefficient. There should be a better way of doing this. # the issue is that adjacency matrix entries for self-loops are 2, @@ -73,18 +73,18 @@ for a graph `g`, indexed by `[u, v]` vertices. `T` defaults to `Int` for both gr ### Optional Arguments `dir=:unspec`: `:unspec`, `:both`, :in`, and `:out` are currently supported. For undirected graphs, `dir` defaults to `:out`; for directed graphs, -`dir` defaults to `:both`. +`dir` defaults to `:both`. """ function laplacian_matrix(g::AbstractGraph{U}, T::DataType=Int; dir::Symbol=:unspec) where U if dir == :unspec dir = is_directed(g) ? :both : :out end A = adjacency_matrix(g, T; dir=dir) - D = convert(SparseMatrixCSC{T, U}, spdiagm(sum(A, 2)[:])) + D = convert(SparseArrays.SparseMatrixCSC{T,U}, LinearAlgebra.Diagonal(SparseArrays.sparse(sum(A, dims=2)[:]))) return D - A end -@doc_str """ +""" laplacian_spectrum(g[, T=Int; dir=:unspec]) Return the eigenvalues of the Laplacian matrix for a graph `g`, indexed @@ -98,12 +98,12 @@ by vertex. Default values for `T` are the same as those in Converts the matrix to dense with ``nv^2`` memory usage. ### Implementation Notes -Use `eigs(laplacian_matrix(g); kwargs...)` to compute some of the +Use `IterativeEigensolvers.eigs(laplacian_matrix(g); kwargs...)` to compute some of the eigenvalues/eigenvectors. """ -laplacian_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) = eigvals(full(laplacian_matrix(g, T; dir=dir))) +laplacian_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) = LinearAlgebra.eigvals(Matrix(laplacian_matrix(g, T; dir=dir))) -@doc_str """ +""" Return the eigenvalues of the adjacency matrix for a graph `g`, indexed by vertex. Default values for `T` are the same as those in [`adjacency_matrix`](@ref). @@ -115,14 +115,14 @@ by vertex. Default values for `T` are the same as those in Converts the matrix to dense with ``nv^2`` memory usage. ### Implementation Notes -Use `eigs(adjacency_matrix(g); kwargs...)` to compute some of the +Use `IterativeEigensolvers.eigs(adjacency_matrix(g); kwargs...)` to compute some of the eigenvalues/eigenvectors. """ function adjacency_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) if dir == :unspec dir = is_directed(g) ? :both : :out end - return eigvals(full(adjacency_matrix(g, T; dir=dir))) + return LinearAlgebra.eigvals(Matrix(adjacency_matrix(g, T; dir=dir))) end """ @@ -147,13 +147,13 @@ function incidence_matrix(g::AbstractGraph, T::DataType=Int; oriented=false) # every col has the same 2 entries colpt = collect(1:2:(nz + 1)) - nzval = repmat([(isdir || oriented) ? -one(T) : one(T), one(T)], n_e) + nzval = repeat([(isdir || oriented) ? -one(T) : one(T), one(T)], n_e) # iterate over edges for row indices rowval = zeros(Int, nz) i = 1 for u in vertices(g) - for v in out_neighbors(g, u) + for v in outneighbors(g, u) if isdir || u < v # add every edge only once rowval[2 * i - 1] = u rowval[2 * i] = v @@ -162,11 +162,11 @@ function incidence_matrix(g::AbstractGraph, T::DataType=Int; oriented=false) end end - spmx = SparseMatrixCSC(n_v, n_e, colpt, rowval, nzval) + spmx = SparseArrays.SparseMatrixCSC(n_v, n_e, colpt, rowval, nzval) return spmx end -@doc_str """ +""" spectral_distance(G₁, G₂ [, k]) Compute the spectral distance between undirected n-vertex @@ -179,18 +179,18 @@ If `k` is ommitted, uses full spectrum. function spectral_distance end # can't use Traitor syntax here (https://github.com/mauro3/SimpleTraits.jl/issues/36) -@traitfn function spectral_distance{G<:AbstractGraph; !IsDirected{G}}(G₁::G, G₂::G, k::Integer) +@traitfn function spectral_distance(G₁::G, G₂::G, k::Integer) where {G <: AbstractGraph; !IsDirected{G}} A₁ = adjacency_matrix(G₁) A₂ = adjacency_matrix(G₂) - λ₁ = k < nv(G₁) - 1 ? eigs(A₁, nev=k, which=:LR)[1] : eigvals(full(A₁))[end:-1:(end - (k - 1))] - λ₂ = k < nv(G₂) - 1 ? eigs(A₂, nev=k, which=:LR)[1] : eigvals(full(A₂))[end:-1:(end - (k - 1))] + λ₁ = k < nv(G₁) - 1 ? IterativeEigensolvers.eigs(A₁, nev=k, which=:LR)[1] : LinearAlgebra.eigvals(Matrix(A₁))[end:-1:(end - (k - 1))] + λ₂ = k < nv(G₂) - 1 ? IterativeEigensolvers.eigs(A₂, nev=k, which=:LR)[1] : LinearAlgebra.eigvals(Matrix(A₂))[end:-1:(end - (k - 1))] return sum(abs, (λ₁ - λ₂)) end # can't use Traitor syntax here (https://github.com/mauro3/SimpleTraits.jl/issues/36) -@traitfn function spectral_distance{G<:AbstractGraph; !IsDirected{G}}(G₁::G, G₂::G) +@traitfn function spectral_distance(G₁::G, G₂::G) where {G <: AbstractGraph; !IsDirected{G}} nv(G₁) == nv(G₂) || throw(ArgumentError("Spectral distance not defined for |G₁| != |G₂|")) return spectral_distance(G₁, G₂, nv(G₁)) end diff --git a/src/operators.jl b/src/operators.jl index 0e5bf7e93..8e2c07165 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -24,9 +24,9 @@ function complement(g::DiGraph) gnv = nv(g) h = SimpleDiGraph(gnv) for i in vertices(g), j in vertices(g) - if i != j && !has_edge(g, i, j) - add_edge!(h, i, j) - end + if i != j && !has_edge(g, i, j) + add_edge!(h, i, j) + end end return h end @@ -62,8 +62,8 @@ function reverse! end return g end -doc""" - blkdiag(g, h) +""" + blockdiag(g, h) Return a graph with ``|V(g)| + |V(h)|`` vertices and ``|E(g)| + |E(h)|`` edges where the vertices an edges from graph `h` are appended to graph `g`. @@ -72,7 +72,7 @@ edges where the vertices an edges from graph `h` are appended to graph `g`. Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function blkdiag(g::T, h::T) where T<:AbstractGraph +function SparseArrays.blockdiag(g::T, h::T) where T <: AbstractGraph gnv = nv(g) r = T(gnv + nv(h)) for e in edges(g) @@ -93,7 +93,7 @@ Return a graph with edges that are only in both graph `g` and graph `h`. This function may produce a graph with 0-degree vertices. Preserves the eltype of the input graph. """ -function intersect(g::T, h::T) where T<:AbstractGraph +function intersect(g::T, h::T) where T <: AbstractGraph gnv = nv(g) hnv = nv(h) @@ -113,7 +113,7 @@ Return a graph with edges in graph `g` that are not in graph `h`. Note that this function may produce a graph with 0-degree vertices. Preserves the eltype of the input graph. """ -function difference(g::T, h::T) where T<:AbstractGraph +function difference(g::T, h::T) where T <: AbstractGraph gnv = nv(g) hnv = nv(h) @@ -135,7 +135,7 @@ Note that this function may produce a graph with 0-degree vertices. Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function symmetric_difference(g::T, h::T) where T<:AbstractGraph +function symmetric_difference(g::T, h::T) where T <: AbstractGraph gnv = nv(g) hnv = nv(h) @@ -159,7 +159,7 @@ of all vertices and edges. Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function union(g::T, h::T) where T<:AbstractGraph +function union(g::T, h::T) where T <: AbstractGraph gnv = nv(g) hnv = nv(h) @@ -181,15 +181,15 @@ end """ join(g, h) -Return a graph that combines graphs `g` and `h` using `blkdiag` and then +Return a graph that combines graphs `g` and `h` using `blockdiag` and then adds all the edges between the vertices in `g` and those in `h`. ### Implementation Notes Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function join(g::T, h::T) where T<:AbstractGraph - r = blkdiag(g, h) +function join(g::T, h::T) where T <: AbstractGraph + r = SparseArrays.blockdiag(g, h) for i in vertices(g) for j = (nv(g) + 1):(nv(g) + nv(h)) add_edge!(r, i, j) @@ -211,7 +211,7 @@ in the generated graph exceeds the eltype. """ function crosspath end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function crosspath{T, AG<:AbstractGraph{T}}(len::Integer, g::AG::(!IsDirected)) +@traitfn function crosspath(len::Integer, g::AG::(!IsDirected)) where {T, AG <: AbstractGraph{T}} p = PathGraph(len) h = Graph{T}(p) return cartesian_product(h, g) @@ -221,7 +221,7 @@ end # """Provides multiplication of a graph `g` by a vector `v` such that spectral # graph functions in [GraphMatrices.jl](https://github.com/jpfairbanks/GraphMatrices.jl) can utilize LightGraphs natively. # """ -function *(g::Graph, v::Vector{T}) where T<:Real +function *(g::Graph, v::Vector{T}) where T <: Real length(v) == nv(g) || throw(ArgumentError("Vector size must equal number of vertices")) y = zeros(T, nv(g)) for e in edges(g) @@ -233,7 +233,7 @@ function *(g::Graph, v::Vector{T}) where T<:Real return y end -function *(g::DiGraph, v::Vector{T}) where T<:Real +function *(g::DiGraph, v::Vector{T}) where T <: Real length(v) == nv(g) || throw(ArgumentError("Vector size must equal number of vertices")) y = zeros(T, nv(g)) for e in edges(g) @@ -276,11 +276,11 @@ sum(g::AbstractGraph) = ne(g) Return the default adjacency matrix of `g`. """ -sparse(g::AbstractGraph) = adjacency_matrix(g) +SparseArrays.sparse(g::AbstractGraph) = adjacency_matrix(g) length(g::AbstractGraph) = nv(g) * nv(g) ndims(g::AbstractGraph) = 2 -issymmetric(g::AbstractGraph) = !is_directed(g) +LinearAlgebra.issymmetric(g::AbstractGraph) = !is_directed(g) """ cartesian_product(g, h) @@ -292,7 +292,7 @@ of `g` and `h`. Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function cartesian_product(g::G, h::G) where G<:AbstractGraph +function cartesian_product(g::G, h::G) where G <: AbstractGraph z = G(nv(g) * nv(h)) id(i, j) = (i - 1) * nv(h) + j for e in edges(g) @@ -321,7 +321,7 @@ of `g` and `h`. Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function tensor_product(g::G, h::G) where G<:AbstractGraph +function tensor_product(g::G, h::G) where G <: AbstractGraph z = G(nv(g) * nv(h)) id(i, j) = (i - 1) * nv(h) + j for e1 in edges(g) @@ -374,11 +374,11 @@ julia> sg, vmap = induced_subgraph(g, elist) julia> @assert sg == g[elist] ``` """ -function induced_subgraph(g::T, vlist::AbstractVector{U}) where T<:AbstractGraph where U<:Integer +function induced_subgraph(g::T, vlist::AbstractVector{U}) where T <: AbstractGraph where U <: Integer allunique(vlist) || throw(ArgumentError("Vertices in subgraph list must be unique")) h = T(length(vlist)) newvid = Dict{U,U}() - vmap = Vector{U}(length(vlist)) + vmap = Vector{U}(undef, length(vlist)) for (i, v) in enumerate(vlist) newvid[v] = U(i) vmap[i] = v @@ -386,7 +386,7 @@ function induced_subgraph(g::T, vlist::AbstractVector{U}) where T<:AbstractGraph vset = Set(vlist) for s in vlist - for d in out_neighbors(g, s) + for d in outneighbors(g, s) # println("s = $s, d = $d") if d in vset && has_edge(g, s, d) newe = Edge(newvid[s], newvid[d]) @@ -398,7 +398,7 @@ function induced_subgraph(g::T, vlist::AbstractVector{U}) where T<:AbstractGraph end -function induced_subgraph(g::AG, elist::AbstractVector{U}) where AG<:AbstractGraph{T} where T where U<:AbstractEdge +function induced_subgraph(g::AG, elist::AbstractVector{U}) where AG <: AbstractGraph{T} where T where U <: AbstractEdge h = zero(g) newvid = Dict{T,T}() vmap = Vector{T}() @@ -450,7 +450,7 @@ Determine how many elements of vs are less than i for all i in 1:n. """ function compute_shifts(n::Integer, x::AbstractArray) tmp = zeros(eltype(x), n) - tmp[x[2:end]] = 1 + tmp[x[2:end]] .= 1 return cumsum!(tmp, tmp) end @@ -463,12 +463,12 @@ function merge_vertices(g::AbstractGraph, vs) labels = collect(1:nv(g)) # Use lowest value as new vertex id. sort!(vs) - nvnew = nv(g) - length(unique(vs)) +1 + nvnew = nv(g) - length(unique(vs)) + 1 nvnew <= nv(g) || return g (v0, vm) = extrema(vs) v0 > 0 || throw(ArgumentError("invalid vertex ID: $v0 in list of vertices to be merged")) vm <= nv(g) || throw(ArgumentError("vertex $vm not found in graph")) # TODO 0.7: change to DomainError? - labels[vs] = v0 + labels[vs] .= v0 shifts = compute_shifts(nv(g), vs[2:end]) for v in vertices(g) if labels[v] != v0 @@ -501,23 +501,23 @@ Supports SimpleGraph only. """ function merge_vertices!(g::Graph{T}, vs::Vector{U} where U <: Integer) where T vs = sort!(unique(vs)) - merged_vertex = shift!(vs) + merged_vertex = popfirst!(vs) x = zeros(Int, nv(g)) - x[vs] = 1 + x[vs] .= 1 new_vertex_ids = collect(1:nv(g)) .- cumsum(x) - new_vertex_ids[vs] = merged_vertex + new_vertex_ids[vs] .= merged_vertex for i in vertices(g) # Adjust connections to merged vertices if (i != merged_vertex) && !insorted(i, vs) nbrs_to_rewire = Set{T}() - for j in out_neighbors(g, i) - if insorted(j, vs) - push!(nbrs_to_rewire, merged_vertex) - else - push!(nbrs_to_rewire, new_vertex_ids[j]) - end + for j in outneighbors(g, i) + if insorted(j, vs) + push!(nbrs_to_rewire, merged_vertex) + else + push!(nbrs_to_rewire, new_vertex_ids[j]) + end end g.fadjlist[new_vertex_ids[i]] = sort(collect(nbrs_to_rewire)) @@ -529,7 +529,7 @@ function merge_vertices!(g::Graph{T}, vs::Vector{U} where U <: Integer) where T push!(nbrs_to_merge, new_vertex_ids[element]) end - for j in vs, e in out_neighbors(g, j) + for j in vs, e in outneighbors(g, j) if new_vertex_ids[e] != merged_vertex push!(nbrs_to_merge, new_vertex_ids[e]) end diff --git a/src/persistence/lg.jl b/src/persistence/lg.jl index 000eb0b8d..91dbd028d 100644 --- a/src/persistence/lg.jl +++ b/src/persistence/lg.jl @@ -25,7 +25,7 @@ struct LGHeader code::String end function show(io::IO, h::LGHeader) - isdir = h.is_directed? "d" : "u" + isdir = h.is_directed ? "d" : "u" print(io, "$(h.nv),$(h.ne),$isdir,$(h.name),$(h.ver),$(h.dtype),$(h.code)") end @@ -60,7 +60,7 @@ end function _parse_header(s::AbstractString) addl_info = false nvstr, nestr, dirundir, graphname = split(s, r"s*,s*", limit=4) - if contains(graphname, ",") # version number and type + if occursin(",", graphname) # version number and type graphname, _ver, _dtype, graphcode = split(graphname, r"s*,s*") ver = parse(Int, _ver) dtype = eval(Symbol(_dtype)) diff --git a/src/shortestpaths/astar.jl b/src/shortestpaths/astar.jl index ee78db495..a803c8786 100644 --- a/src/shortestpaths/astar.jl +++ b/src/shortestpaths/astar.jl @@ -13,19 +13,19 @@ function a_star_impl!( ) while !isempty(frontier) - (cost_so_far, path, u) = dequeue!(frontier) + (cost_so_far, path, u) = DataStructures.dequeue!(frontier) if u == t return path end - for v in LightGraphs.out_neighbors(g, u) + for v in LightGraphs.outneighbors(g, u) if colormap[v] < 2 dist = distmx[u, v] colormap[v] = 1 - new_path = cat(1, path, Edge(u, v)) + new_path = cat(path, Edge(u, v), dims=1) path_cost = cost_so_far + dist - enqueue!(frontier, + DataStructures.enqueue!(frontier, (path_cost, new_path, v), path_cost + heuristic(v)) end @@ -53,7 +53,7 @@ function a_star( heuristic::Function = n -> 0 ) where T where U # heuristic (under)estimating distance to target - frontier = PriorityQueue{Tuple{T,Vector{Edge},U}, T}() + frontier = DataStructures.PriorityQueue{Tuple{T,Vector{Edge},U}, T}() frontier[(zero(T), Vector{Edge}(), s)] = zero(T) colormap = zeros(Int, nv(g)) colormap[s] = 1 diff --git a/src/shortestpaths/bellman-ford.jl b/src/shortestpaths/bellman-ford.jl index 3fa9ceac9..d6c56a928 100644 --- a/src/shortestpaths/bellman-ford.jl +++ b/src/shortestpaths/bellman-ford.jl @@ -30,13 +30,13 @@ function bellman_ford_shortest_paths!( ) where R<:Real where T<:Integer active = Set{T}(sources) - state.dists[sources] = state.parents[sources] = 0 + state.dists[sources] = state.parents[sources] .= 0 no_changes = false for i in one(T):nv(graph) no_changes = true new_active = Set{T}() for u in active - for v in out_neighbors(graph, u) + for v in outneighbors(graph, u) edist = distmx[u, v] if state.dists[v] > state.dists[u] + edist state.dists[v] = state.dists[u] + edist @@ -94,7 +94,7 @@ function enumerate_paths(state::AbstractPathState, vs::Vector{T}) where T<:Integ parents = state.parents num_vs = length(vs) - all_paths = Vector{Vector{T}}(num_vs) + all_paths = Vector{Vector{T}}(undef, num_vs) for i = 1:num_vs all_paths[i] = Vector{T}() index = vs[i] diff --git a/src/shortestpaths/dijkstra.jl b/src/shortestpaths/dijkstra.jl index 369a492dc..4d2d612f7 100644 --- a/src/shortestpaths/dijkstra.jl +++ b/src/shortestpaths/dijkstra.jl @@ -3,7 +3,7 @@ An [`AbstractPathState`](@ref) designed for Dijkstra shortest-paths calculations. """ -struct DijkstraState{T<:Real,U<:Integer} <: AbstractPathState +struct DijkstraState{T <: Real,U <: Integer} <: AbstractPathState parents::Vector{U} dists::Vector{T} predecessors::Vector{Vector{U}} @@ -22,13 +22,12 @@ Return a [`LightGraphs.DijkstraState`](@ref) that contains various traversal inf - `allpaths=false`: If true, returns a [`LightGraphs.DijkstraState`](@ref) that keeps track of all predecessors of a given vertex. """ -function dijkstra_shortest_paths( - g::AbstractGraph, +function dijkstra_shortest_paths(g::AbstractGraph, srcs::Vector{U}, distmx::AbstractMatrix{T}=weights(g); allpaths=false, trackvertices=false - ) where T <: Real where U<:Integer + ) where T <: Real where U <: Integer nvg = nv(g) dists = fill(typemax(T), nvg) @@ -36,9 +35,10 @@ function dijkstra_shortest_paths( preds = fill(Vector{U}(), nvg) visited = zeros(Bool, nvg) pathcounts = zeros(Int, nvg) - H = PriorityQueue{U,T}() - dists[srcs] = zero(T) - pathcounts[srcs] = 1 + H = DataStructures.PriorityQueue{U,T}() + dists[srcs] .= zero(T) + pathcounts[srcs] .= 1 + closest_vertices = Vector{U}() # Maintains vertices in order of distances from source sizehint!(closest_vertices, nvg) @@ -49,15 +49,15 @@ function dijkstra_shortest_paths( end while !isempty(H) - hentry = dequeue_pair!(H) + hentry = DataStructures.dequeue_pair!(H) # info("Popped H - got $(hentry.vertex)") u = hentry[1] if trackvertices - push!(closest_vertices, u) + push!(closest_vertices, u) end - for v in out_neighbors(g, u) + for v in outneighbors(g, u) alt = (dists[u] == typemax(T)) ? typemax(T) : dists[u] + distmx[u, v] if !visited[v] @@ -90,15 +90,15 @@ function dijkstra_shortest_paths( end if trackvertices - for s in vertices(g) - if !visited[s] - push!(closest_vertices, s) + for s in vertices(g) + if !visited[s] + push!(closest_vertices, s) + end end - end end - pathcounts[srcs] = 1 - parents[srcs] = 0 + pathcounts[srcs] .= 1 + parents[srcs] .= 0 for src in srcs preds[src] = [] end @@ -106,7 +106,7 @@ function dijkstra_shortest_paths( return DijkstraState{T,U}(parents, dists, preds, pathcounts, closest_vertices) end -dijkstra_shortest_paths(g::AbstractGraph, src::Integer, distmx::AbstractMatrix = weights(g); allpaths=false, trackvertices=false) = +dijkstra_shortest_paths(g::AbstractGraph, src::Integer, distmx::AbstractMatrix=weights(g); allpaths=false, trackvertices=false) = dijkstra_shortest_paths(g, [src;], distmx; allpaths=allpaths, trackvertices=trackvertices) """ @@ -114,12 +114,12 @@ dijkstra_shortest_paths(g, [src;], distmx; allpaths=allpaths, trackvertices=trac An [`AbstractPathState`](@ref) designed for multisource_dijkstra_shortest_paths calculation. """ -struct MultipleDijkstraState{T<:Real,U<:Integer} <: AbstractPathState +struct MultipleDijkstraState{T <: Real,U <: Integer} <: AbstractPathState dists::Matrix{T} parents::Matrix{U} end -@doc_str """ +""" parallel_multisource_dijkstra_shortest_paths(g, sources=vertices(g), distmx=weights(g)) Compute the shortest paths between all pairs of vertices in graph `g` by running @@ -127,26 +127,23 @@ Compute the shortest paths between all pairs of vertices in graph `g` by running an optional distance matrix `distmx`. Return a [`MultipleDijkstraState`](@ref) with relevant traversal information. """ - -function parallel_multisource_dijkstra_shortest_paths( - g::AbstractGraph{U}, - sources::AbstractVector = vertices(g), - distmx::AbstractMatrix{T} = weights(g) - ) where T <: Real where U +function parallel_multisource_dijkstra_shortest_paths(g::AbstractGraph{U}, + sources::AbstractVector=vertices(g), + distmx::AbstractMatrix{T}=weights(g)) where T <: Real where U n_v = nv(g) r_v = length(sources) # TODO: remove `Int` once julialang/#23029 / #23032 are resolved - dists = SharedMatrix{T}(Int(r_v), Int(n_v)) - parents = SharedMatrix{U}(Int(r_v), Int(n_v)) + dists = SharedArrays.SharedMatrix{T}(Int(r_v), Int(n_v)) + parents = SharedArrays.SharedMatrix{U}(Int(r_v), Int(n_v)) - @sync @parallel for i in 1:r_v - state = dijkstra_shortest_paths(g, sources[i], distmx) - dists[i, :] = state.dists - parents[i, :] = state.parents + Distributed.@sync Distributed.@distributed for i in 1:r_v + state = dijkstra_shortest_paths(g, sources[i], distmx) + dists[i, :] = state.dists + parents[i, :] = state.parents end - result = MultipleDijkstraState(sdata(dists), sdata(parents)) + result = MultipleDijkstraState(SharedArrays.sdata(dists), SharedArrays.sdata(parents)) return result end diff --git a/src/shortestpaths/floyd-warshall.jl b/src/shortestpaths/floyd-warshall.jl index fa845ba29..40eb8fc4a 100644 --- a/src/shortestpaths/floyd-warshall.jl +++ b/src/shortestpaths/floyd-warshall.jl @@ -12,7 +12,7 @@ struct FloydWarshallState{T,U<:Integer} <: AbstractPathState parents::Matrix{U} end -@doc_str """ +@doc """ floyd_warshall_shortest_paths(g, distmx=weights(g)) Use the [Floyd-Warshall algorithm](http://en.wikipedia.org/wiki/Floyd–Warshall_algorithm) to compute the shortest paths between all pairs of vertices in graph `g` using an @@ -30,7 +30,6 @@ function floyd_warshall_shortest_paths( dists = fill(typemax(T), (Int(n_v), Int(n_v))) parents = zeros(U, (Int(n_v), Int(n_v))) - # fws = FloydWarshallState(Matrix{T}(), Matrix{Int}()) for v in 1:n_v dists[v, v] = zero(T) end @@ -48,15 +47,19 @@ function floyd_warshall_shortest_paths( parents[v, u] = v end end - for w in vertices(g), u in vertices(g), v in vertices(g) - if dists[u, w] == typemax(T) || dists[w, v] == typemax(T) - ans = typemax(T) - else - ans = dists[u, w] + dists[w, v] - end - if dists[u, v] > ans - dists[u, v] = dists[u, w] + dists[w, v] - parents[u, v] = parents[w, v] + + for pivot in vertices(g) + for v in vertices(g) + d = dists[pivot, v] + d == typemax(T) && continue + p = parents[pivot, v] + for u in vertices(g) + ans = (dists[u, pivot] == typemax(T) ? typemax(T) : dists[u, pivot] + d) + if dists[u, v] > ans + dists[u, v] = ans + parents[u, v] = p + end + end end end fws = FloydWarshallState(dists, parents) diff --git a/src/shortestpaths/johnson.jl b/src/shortestpaths/johnson.jl new file mode 100644 index 000000000..d83f44e42 --- /dev/null +++ b/src/shortestpaths/johnson.jl @@ -0,0 +1,103 @@ + +""" + struct JohnsonState{T, U} +An [`AbstractPathState`](@ref) designed for Johnson shortest-paths calculations. +""" +struct JohnsonState{T <: Real,U <: Integer} <: AbstractPathState + dists::Matrix{T} + parents::Matrix{U} +end + +@doc """ + johnson_shortest_paths(g, distmx=weights(g); parallel=false) + +### Implementation Notes +Use the [Johnson algorithm](https://en.wikipedia.org/wiki/Johnson%27s_algorithm) +to compute the shortest paths between all pairs of vertices in graph `g` using an +optional distance matrix `distmx`. +If the parameter parallel is set true, dijkstra_shortest_paths will run in parallel. +Parallel bellman_ford_shortest_paths is currently unavailable +Return a [`LightGraphs.JohnsonState`](@ref) with relevant +traversal information. +Behaviour in case of negative cycle depends on bellman_ford_shortest_paths. +Throws NegativeCycleError() if a negative cycle is present. +### Performance +Complexity: O(|V|*|E|) +If distmx is not mutable or of type, DefaultDistance than a sparse matrix will be produced using distmx. +In the case that distmx is immutable, to reduce memory overhead, +if edge (a, b) does not exist in g then distmx[a, b] should be set to 0. +### Dependencies from LightGraphs +bellman_ford_shortest_paths +parallel_multisource_dijkstra_shortest_paths +dijkstra_shortest_paths +""" +function johnson_shortest_paths(g::AbstractGraph{U}, + distmx::AbstractMatrix{T}=weights(g); + parallel::Bool=false +) where T <: Real where U <: Integer + + nvg = nv(g) + type_distmx = typeof(distmx) + #Change when parallel implementation of Bellman Ford available + wt_transform = bellman_ford_shortest_paths(g, vertices(g), distmx).dists + + if !type_distmx.mutable && type_distmx != LightGraphs.DefaultDistance + distmx = SparseArrays.sparse(distmx) #Change reference, not value + end + + #Weight transform not needed if all weights are positive. + if type_distmx != LightGraphs.DefaultDistance + for e in edges(g) + distmx[src(e), dst(e)] += wt_transform[src(e)] - wt_transform[dst(e)] + end + end + + if !parallel + dists = Matrix{T}(undef, nvg, nvg) + parents = Matrix{U}(undef, nvg, nvg) + for v in vertices(g) + dijk_state = dijkstra_shortest_paths(g, v, distmx) + dists[v, :] = dijk_state.dists + parents[v, :] = dijk_state.parents + end + else + dijk_state = parallel_multisource_dijkstra_shortest_paths(g, vertices(g), distmx) + dists = dijk_state.dists + parents = dijk_state.parents + end + + broadcast!(-, dists, dists, wt_transform) + for v in vertices(g) + dists[:, v] .+= wt_transform[v] #Vertical traversal prefered + end + + if type_distmx.mutable + for e in edges(g) + distmx[src(e), dst(e)] += wt_transform[dst(e)] - wt_transform[src(e)] + end + end + + return JohnsonState(dists, parents) +end + +function enumerate_paths(s::JohnsonState{T,U}, v::Integer) where T <: Real where U <: Integer + pathinfo = s.parents[v, :] + paths = Vector{Vector{U}}() + for i in 1:length(pathinfo) + if (i == v) || (s.dists[v, i] == typemax(T)) + push!(paths, Vector{U}()) + else + path = Vector{U}() + currpathindex = i + while currpathindex != 0 + push!(path, currpathindex) + currpathindex = pathinfo[currpathindex] + end + push!(paths, reverse(path)) + end + end + return paths +end + +enumerate_paths(s::JohnsonState) = [enumerate_paths(s, v) for v in 1:size(s.parents, 1)] +enumerate_paths(st::JohnsonState, s::Integer, d::Integer) = enumerate_paths(st, s)[d] diff --git a/src/shortestpaths/yen.jl b/src/shortestpaths/yen.jl index a5241ba4b..029d327d4 100644 --- a/src/shortestpaths/yen.jl +++ b/src/shortestpaths/yen.jl @@ -33,7 +33,7 @@ function yen_k_shortest_paths( dists = Array{T,1}() push!(dists, dj.dists[target]) A = [path] - B = PriorityQueue() + B = DataStructures.PriorityQueue() gcopy = deepcopy(g) for k = 1:(K - 1) @@ -81,7 +81,7 @@ function yen_k_shortest_paths( distpath = distrootpath + djspur.dists[target] # Add the potential k-shortest path to the heap if !haskey(B, pathtotal) - enqueue!(B, pathtotal, distpath) + DataStructures.enqueue!(B, pathtotal, distpath) end end @@ -96,7 +96,7 @@ function yen_k_shortest_paths( # The path with minimum distance in B is higher than maxdist mindistB > maxdist && break push!(dists, DataStructures.peek(B)[2]) - push!(A, dequeue!(B)) + push!(A, DataStructures.dequeue!(B)) end return YenState{T,U}(dists, A) diff --git a/src/spanningtrees/kruskal.jl b/src/spanningtrees/kruskal.jl index 8eeed52bc..2fb01bfff 100644 --- a/src/spanningtrees/kruskal.jl +++ b/src/spanningtrees/kruskal.jl @@ -1,58 +1,32 @@ -struct KruskalHeapEntry{T<:Real} - edge::Edge - dist::T -end - -isless(e1::KruskalHeapEntry, e2::KruskalHeapEntry) = e1.dist < e2.dist - -""" - quick_find!(vs, p, q) - -Perform [Quick-Find algorithm](https://en.wikipedia.org/wiki/Disjoint-set_data_structure) -on a given pair of vertices `p`and `q`, and make a connection between them in the vector `vs`. -""" -function quick_find!(vs, p, q) - pid = vs[p] - qid = vs[q] - for i in 1:length(vs) - if vs[i] == pid - vs[i] = qid - end - end -end - """ kruskal_mst(g, distmx=weights(g)) - Return a vector of edges representing the minimum spanning tree of a connected, undirected graph `g` with optional distance matrix `distmx` using [Kruskal's algorithm](https://en.wikipedia.org/wiki/Kruskal%27s_algorithm). """ function kruskal_mst end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function kruskal_mst{T, U, AG<:AbstractGraph{U}}( +@traitfn function kruskal_mst( g::AG::(!IsDirected), distmx::AbstractMatrix{T} = weights(g) -) +) where {T<:Real, U, AG<:AbstractGraph{U}} - edge_list = Vector{KruskalHeapEntry{T}}() - mst = Vector{Edge}() - connected_vs = collect(one(U):nv(g)) + connected_vs = DataStructures.IntDisjointSets(nv(g)) - sizehint!(edge_list, ne(g)) - sizehint!(mst, ne(g)) + mst = Vector{Edge}() + sizehint!(mst, nv(g) - 1) - for e in edges(g) - heappush!(edge_list, KruskalHeapEntry{T}(e, distmx[src(e), dst(e)])) + weights = Vector{T}() + sizehint!(weights, ne(g)) + edge_list = collect(edges(g)) + for e in edge_list + push!(weights, distmx[src(e), dst(e)]) end - while !isempty(edge_list) && length(mst) < nv(g) - 1 - heap_entry = heappop!(edge_list) - v = src(heap_entry.edge) - w = dst(heap_entry.edge) - - if connected_vs[v] != connected_vs[w] - quick_find!(connected_vs, v, w) - push!(mst, heap_entry.edge) + for e in edge_list[sortperm(weights)] + if !DataStructures.in_same_set(connected_vs, e.src, e.dst) + DataStructures.union!(connected_vs, e.src, e.dst) + push!(mst, e) + (length(mst) >= nv(g) - 1) && break end end diff --git a/src/spanningtrees/prim.jl b/src/spanningtrees/prim.jl index 939de86a7..cacab2784 100644 --- a/src/spanningtrees/prim.jl +++ b/src/spanningtrees/prim.jl @@ -14,19 +14,19 @@ Return a vector of edges. """ function prim_mst end @traitfn function prim_mst( - g::::(!IsDirected), - distmx::AbstractMatrix = weights(g) - ) - pq = Vector{PrimHeapEntry}() + g::AG::(!IsDirected), + distmx::AbstractMatrix{T} = weights(g) + ) where {T<:Real, U, AG<:AbstractGraph{U}} + pq = Vector{PrimHeapEntry{T}}() mst = Vector{Edge}() marked = zeros(Bool, nv(g)) sizehint!(pq, ne(g)) - sizehint!(mst, ne(g)) + sizehint!(mst, nv(g) - 1) visit!(g, 1, marked, pq, distmx) while !isempty(pq) - heap_entry = heappop!(pq) + heap_entry = DataStructures.heappop!(pq) v = src(heap_entry.edge) w = dst(heap_entry.edge) @@ -54,11 +54,11 @@ function visit!( distmx::AbstractMatrix ) marked[v] = true - for w in out_neighbors(g, v) + for w in outneighbors(g, v) if !marked[w] x = min(v, w) y = max(v, w) - heappush!(pq, PrimHeapEntry(Edge(x, y), distmx[x, y])) + DataStructures.heappush!(pq, PrimHeapEntry(Edge(x, y), distmx[x, y])) end end end diff --git a/src/traversals/bfs.jl b/src/traversals/bfs.jl index faa6f2913..a672f5cf9 100644 --- a/src/traversals/bfs.jl +++ b/src/traversals/bfs.jl @@ -1,9 +1,13 @@ +### DEVELOPERS NOTE: BFS optimization experiments are typically +### prototyped in gdistances!, since it's one of the simplest +### BFS implementations. + """ tree(parents) Convert a parents array into a directed graph. """ -function tree(parents::AbstractVector{T}) where T<:Integer +function tree(parents::AbstractVector{T}) where T <: Integer n = T(length(parents)) t = DiGraph{T}(n) for (v, u) in enumerate(parents) @@ -28,8 +32,8 @@ implementations which are marginally faster in practice for smaller graphs, but the performance improvements using this implementation on large graphs can be significant. """ -bfs_parents(g::AbstractGraph, s::Integer; dir=:out) = - (dir == :out) ? _bfs_parents(g, s, out_neighbors) : _bfs_parents(g, s, in_neighbors) +bfs_parents(g::AbstractGraph, s::Integer; dir = :out) = + (dir == :out) ? _bfs_parents(g, s, outneighbors) : _bfs_parents(g, s, inneighbors) function _bfs_parents(g::AbstractGraph{T}, source, neighborfn::Function) where T n = nv(g) @@ -69,18 +73,26 @@ and return a directed acyclic graph of vertices in the order they were discovere If `dir` is specified, use the corresponding edge direction (`:in` and `:out` are acceptable values). """ -bfs_tree(g::AbstractGraph, s::Integer; dir=:out) = tree(bfs_parents(g, s; dir=dir)) +bfs_tree(g::AbstractGraph, s::Integer; dir = :out) = tree(bfs_parents(g, s; dir = dir)) """ - gdistances!(g, source, dists) + gdistances!(g, source, dists; sort_alg=QuickSort) -Fill `dists` with the geodesic distances of vertices in `g` from source vertex/vertices -`sources`. `dists` should be a vector of length `nv(g)` filled with `typemax(T)`. -Return `dists`. +Fill `dists` with the geodesic distances of vertices in `g` from source vertex (or +collection of vertices) `source`. `dists` should be a vector of length `nv(g)` +filled with `typemax(T)`. Return `dists`. For vertices in disconnected components the default distance is `typemax(T)`. + +An optional sorting algorithm may be specified (see Performance section). + +### Performance +`gdistances` uses `QuickSort` internally for its default sorting algorithm, since it performs +the best of the algorithms built into Julia Base. However, passing a `RadixSort` (available via +[SortingAlgorithms.jl](https://github.com/JuliaCollections/SortingAlgorithms.jl)) will provide +significant performance improvements on larger graphs. """ -function gdistances!(g::AbstractGraph{T}, source, vert_level) where T +function gdistances!(g::AbstractGraph{T}, source, vert_level; sort_alg = QuickSort) where T n = nv(g) visited = falses(n) n_level = one(T) @@ -95,7 +107,7 @@ function gdistances!(g::AbstractGraph{T}, source, vert_level) where T end while !isempty(cur_level) @inbounds for v in cur_level - @inbounds @simd for i in out_neighbors(g, v) + @inbounds @simd for i in outneighbors(g, v) if !visited[i] push!(next_level, i) vert_level[i] = n_level @@ -106,29 +118,37 @@ function gdistances!(g::AbstractGraph{T}, source, vert_level) where T n_level += one(T) empty!(cur_level) cur_level, next_level = next_level, cur_level - sort!(cur_level) + sort!(cur_level, alg = sort_alg) end return vert_level end """ - gdistances(g, source) + gdistances(g, source; sort_alg=QuickSort) Return a vector filled with the geodesic distances of vertices in `g` from `source`. If `source` is a collection of vertices each element should be unique. For vertices in disconnected components the default distance is `typemax(T)`. + +An optional sorting algorithm may be specified (see Performance section). + +### Performance +`gdistances` uses `QuickSort` internally for its default sorting algorithm, since it performs +the best of the algorithms built into Julia Base. However, passing a `RadixSort` (available via +[SortingAlgorithms.jl](https://github.com/JuliaCollections/SortingAlgorithms.jl)) will provide +significant performance improvements on larger graphs. """ -gdistances(g::AbstractGraph{T}, source) where T = gdistances!(g, source, fill(typemax(T), nv(g))) +gdistances(g::AbstractGraph{T}, source; sort_alg = Base.Sort.QuickSort) where T = gdistances!(g, source, fill(typemax(T), nv(g)); sort_alg = sort_alg) """ has_path(g::AbstractGraph, u, v; exclude_vertices=Vector()) -Return `true` if there is a path from `u to `v` in `g` (while avoiding vertices in +Return `true` if there is a path from `u` to `v` in `g` (while avoiding vertices in `exclude_vertices`) or `u == v`. Return false if there is no such path or if `u` or `v` is in `excluded_vertices`. """ function has_path(g::AbstractGraph{T}, u::Integer, v::Integer; - exclude_vertices::AbstractVector=Vector{T}()) where T + exclude_vertices::AbstractVector = Vector{T}()) where T seen = zeros(Bool, nv(g)) for ve in exclude_vertices # mark excluded vertices as seen seen[ve] = true @@ -139,8 +159,8 @@ function has_path(g::AbstractGraph{T}, u::Integer, v::Integer; push!(next, u) seen[u] = true while !isempty(next) - src = shift!(next) # get new element from queue - for vertex in out_neighbors(g, src) + src = popfirst!(next) # get new element from queue + for vertex in outneighbors(g, src) vertex == v && return true if !seen[vertex] push!(next, vertex) # push onto queue @@ -149,4 +169,4 @@ function has_path(g::AbstractGraph{T}, u::Integer, v::Integer; end end return false -end \ No newline at end of file +end diff --git a/src/traversals/bipartition.jl b/src/traversals/bipartition.jl index 32c9eabcc..42030f299 100644 --- a/src/traversals/bipartition.jl +++ b/src/traversals/bipartition.jl @@ -12,9 +12,9 @@ An empty graph will return an empty vector but is bipartite. function bipartite_map(g::AbstractGraph{T}) where T nvg = nv(g) if !is_directed(g) - ccs = filter(x -> length(x) > 2, connected_components(g)) + ccs = filter(x -> length(x) >= 2, connected_components(g)) else - ccs = filter(x -> length(x) > 2, weakly_connected_components(g)) + ccs = filter(x -> length(x) >= 2, weakly_connected_components(g)) end seen = zeros(Bool, nvg) colors = zeros(Bool, nvg) @@ -24,8 +24,8 @@ function bipartite_map(g::AbstractGraph{T}) where T push!(Q, s) bipartitemap = zeros(UInt8, nvg) while !isempty(Q) - u = shift!(Q) - for v in out_neighbors(g, u) + u = popfirst!(Q) + for v in outneighbors(g, u) if !seen[v] colors[v] = !colors[u] push!(Q, v) @@ -36,7 +36,7 @@ function bipartite_map(g::AbstractGraph{T}) where T end end end - return UInt8.(colors) + one(UInt8) + return UInt8.(colors).+(one(UInt8)) end """ diff --git a/src/traversals/dfs.jl b/src/traversals/dfs.jl index 63865b96a..10b8e58a5 100644 --- a/src/traversals/dfs.jl +++ b/src/traversals/dfs.jl @@ -12,7 +12,7 @@ Uses DFS. function is_cyclic end @traitfn is_cyclic(g::::(!IsDirected)) = ne(g) > 0 # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function is_cyclic{T,AG<:AbstractGraph{T}}(g::AG::IsDirected) +@traitfn function is_cyclic(g::AG::IsDirected) where {T, AG<:AbstractGraph{T}} vcolor = zeros(UInt8, nv(g)) for v in vertices(g) vcolor[v] != 0 && continue @@ -21,7 +21,7 @@ function is_cyclic end while !isempty(S) u = S[end] w = 0 - for n in out_neighbors(g, u) + for n in outneighbors(g, u) if vcolor[n] == 1 return true elseif vcolor[n] == 0 @@ -50,7 +50,7 @@ graph `g` as a vector of vertices in topological order. """ function toplogical_sort_by_dfs end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function topological_sort_by_dfs{T, AG<:AbstractGraph{T}}(g::AG::IsDirected) +@traitfn function topological_sort_by_dfs(g::AG::IsDirected) where {T, AG<:AbstractGraph{T}} vcolor = zeros(UInt8, nv(g)) verts = Vector{T}() for v in vertices(g) @@ -60,7 +60,7 @@ function toplogical_sort_by_dfs end while !isempty(S) u = S[end] w = 0 - for n in out_neighbors(g, u) + for n in outneighbors(g, u) if vcolor[n] == 1 error("The input graph contains at least one loop.") # TODO 0.7 should we use a different error? elseif vcolor[n] == 0 @@ -100,7 +100,7 @@ use the corresponding edge direction (`:in` and `:out` are acceptable values). This version of DFS is iterative. """ dfs_parents(g::AbstractGraph, s::Integer; dir=:out) = -(dir == :out) ? _dfs_parents(g, s, out_neighbors) : _dfs_parents(g, s, in_neighbors) +(dir == :out) ? _dfs_parents(g, s, outneighbors) : _dfs_parents(g, s, inneighbors) function _dfs_parents(g::AbstractGraph{T}, s::Integer, neighborfn::Function) where T parents = zeros(T, nv(g)) diff --git a/src/traversals/diffusion.jl b/src/traversals/diffusion.jl index 1cfbe1c44..de7684daf 100644 --- a/src/traversals/diffusion.jl +++ b/src/traversals/diffusion.jl @@ -12,11 +12,10 @@ are infected at the start of the simulation. specifying `watch` limits reporting to a specific set of vertices reached during the simulation. If left empty, all vertices will be watched. - `normalize=false`: if `false`, set the probability of spread from a vertex ``i`` to -each of the out_neighbors of ``i`` to ``p``. If `true`, set the probability of spread -from a vertex ``i`` to each of the `out_neighbors` of ``i`` to +each of the outneighbors of ``i`` to ``p``. If `true`, set the probability of spread +from a vertex ``i`` to each of the `outneighbors` of ``i`` to ``\\frac{p}{outdegreee(g, i)}``. """ - function diffusion(g::AbstractGraph{T}, p::Real, n::Integer; @@ -27,7 +26,7 @@ function diffusion(g::AbstractGraph{T}, # Initialize watch_set = Set{T}(watch) - infected_vertices = IntSet(initial_infections) + infected_vertices = BitSet(initial_infections) vertices_per_step::Vector{Vector{T}} = [Vector{T}() for i in 1:n] # Record initial infection @@ -45,7 +44,7 @@ function diffusion(g::AbstractGraph{T}, new_infections = Set{T}() for i in infected_vertices - outn = out_neighbors(g, i) + outn = outneighbors(g, i) outd = length(outn) if outd > 0 @@ -55,7 +54,7 @@ function diffusion(g::AbstractGraph{T}, local_p = p end - randsubseq!(randsubseq_buf, outn, local_p) + Random.randsubseq!(randsubseq_buf, outn, local_p) union!(new_infections, randsubseq_buf) end end diff --git a/src/traversals/greedy_color.jl b/src/traversals/greedy_color.jl new file mode 100644 index 000000000..9935dd0ba --- /dev/null +++ b/src/traversals/greedy_color.jl @@ -0,0 +1,129 @@ +""" + struct coloring{T} + +Store number of colors used and mapping from vertex to color +""" +struct coloring{T<:Integer} <: Any + num_colors::T + colors::Vector{T} +end + +best_color(c1::coloring, c2::coloring) = c1.num_colors < c2.num_colors ? c1 : c2 + +""" + perm_greedy_color(g, seq) + +Color graph `g` according to an order specified by `seq` using a greedy heuristic. +seq[i] = v imples that vertex v is the ith vertex to be colored. +""" +function perm_greedy_color( + g::AbstractGraph, + seq::Vector{T} + ) where T <: Integer + + nvg::T = nv(g) + cols = Vector{T}(undef, nvg) + seen = zeros(Bool, nvg + 1) + + for v in seq + seen[v] = true + colors_used = zeros(Bool, nvg) + + for w in neighbors(g, v) + if seen[w] + colors_used[cols[w]] = true + end + end + + + for i in one(T):nvg + if colors_used[i] == false + cols[v] = i + break; + end + end + end + + return coloring{T}(maximum(cols), cols) +end + +""" + degree_greedy_color(g) + +Color graph `g` iteratively in the descending order of the degree of the vertices. +""" +function degree_greedy_color(g::AbstractGraph{T}) where T<:Integer + seq = convert(Vector{T}, sortperm(degree(g) , rev=true)) + return perm_greedy_color(g, seq) +end + +""" + parallel_random_greedy_color(g, reps) + +Color graph `g` iteratively in a random order using a greedy heuristic and +choose the best coloring out of `reps` number of colorings computed in parallel. +""" +function parallel_random_greedy_color( + g::AbstractGraph{T}, + reps::Integer +) where T<:Integer + + best = Distributed.@distributed (best_color) for i in 1:reps + seq = Random.shuffle(vertices(g)) + perm_greedy_color(g, seq) + end + + return convert(coloring{T} ,best) +end + +""" + seq_random_greedy_color(g, reps) + +Color graph `g` iteratively in a random order using a greedy heuristic +and choose the best coloring out of `reps` such random coloring. +""" +function seq_random_greedy_color( + g::AbstractGraph{T}, + reps::Integer +) where T <: Integer + + seq = Random.shuffle(vertices(g)) + best = perm_greedy_color(g, seq) + + for i in 2:reps + Random.shuffle!(seq) + best = best_color(best, perm_greedy_color(g, seq)) + end + return best +end + +""" + random_greedy_color(g, reps=1, parallel=false) + +Color graph `g` iteratively in a random order using a greedy heruistic +and choose the best coloring out of `reps` such random coloring. + +If parallel is true then the colorings are executed in parallel. +""" +random_greedy_color(g::AbstractGraph{T}, reps::Integer = 1, parallel::Bool = false) where {T<:Integer} = +parallel ? parallel_random_greedy_color(g, reps) : seq_random_greedy_color(g, reps) + +""" + greedy_color(g; sort_degree=false, parallel=false, reps = 1) + +Color graph `g` based on [Greedy Coloring Heuristics](https://en.wikipedia.org/wiki/Greedy_coloring) + +The heuristics can be described as choosing a permutation of the vertices and assigning the +lowest color index available iteratively in that order. + +If `sort_degree` is true then the permutation is chosen in reverse sorted order of the degree of the vertices. +`parallel` and `reps` are irrelevant in this case. + +If `sort_degree` is false then `reps` colorings are obtained based on random permutations and the one using least +colors is chosen. + +If `parallel` is true then this function executes coloring in parallel. +""" +greedy_color(g::AbstractGraph{U}; sort_degree::Bool=false, parallel::Bool =false, reps::Integer=1) where {U <: Integer} = +sort_degree ? degree_greedy_color(g) : random_greedy_color(g, reps, parallel) + diff --git a/src/traversals/maxadjvisit.jl b/src/traversals/maxadjvisit.jl index 872cf7bba..90025f5f0 100644 --- a/src/traversals/maxadjvisit.jl +++ b/src/traversals/maxadjvisit.jl @@ -45,7 +45,7 @@ function mincut( u = DataStructures.dequeue!(pq) colormap[u] = 1 - for v in out_neighbors(g, u) + for v in outneighbors(g, u) # if the target of e is already marked then decrease cutweight # otherwise, increase it ew = distmx[u, v] @@ -85,7 +85,7 @@ function maximum_adjacency_visit( g::AbstractGraph, distmx::AbstractMatrix{T}, log::Bool=false, - io::IO=STDOUT + io::IO=stdout ) where T<:Real U = eltype(g) @@ -107,11 +107,11 @@ function maximum_adjacency_visit( #start traversing the graph while !isempty(pq) - u = dequeue!(pq) + u = DataStructures.dequeue!(pq) has_key[u] = false push!(vertices_order, u) log && println(io, "discover vertex: $u") - for v in out_neighbors(g, u) + for v in outneighbors(g, u) log && println(io, " -- examine neighbor from $u to $v") if has_key[v] ed = distmx[u, v] @@ -127,5 +127,5 @@ maximum_adjacency_visit(g::AbstractGraph) = maximum_adjacency_visit( g, weights(g), false, - STDOUT + stdout ) diff --git a/src/traversals/parallel_bfs.jl b/src/traversals/parallel_bfs.jl index 9da25738d..9feb3d86c 100644 --- a/src/traversals/parallel_bfs.jl +++ b/src/traversals/parallel_bfs.jl @@ -11,7 +11,7 @@ using Base.Threads -import Base: push!, shift!, isempty, getindex +import Base: push!, popfirst!, isempty, getindex export bfs_tree, LevelSynchronousBFS @@ -22,7 +22,6 @@ struct LevelSynchronousBFS end A thread safe queue implementation for using as the queue for BFS. """ - struct ThreadQueue{T,N<:Integer} data::Vector{T} head::Atomic{N} #Index of the head @@ -30,7 +29,7 @@ struct ThreadQueue{T,N<:Integer} end function ThreadQueue(T::Type, maxlength::N) where N <: Integer - q = ThreadQueue(Vector{T}(maxlength), Atomic{N}(1), Atomic{N}(1)) + q = ThreadQueue(Vector{T}(undef, maxlength), Atomic{N}(1), Atomic{N}(1)) return q end @@ -41,7 +40,7 @@ function push!(q::ThreadQueue{T,N}, val::T) where T where N return offset end -function shift!(q::ThreadQueue{T,N}) where T where N +function popfirst!(q::ThreadQueue{T,N}) where T where N # TODO: check that head < tail offset = atomic_add!(q.head, one(N)) return q.data[offset] diff --git a/src/traversals/randomwalks.jl b/src/traversals/randomwalks.jl index aebd3dce7..6e224fbf5 100644 --- a/src/traversals/randomwalks.jl +++ b/src/traversals/randomwalks.jl @@ -13,7 +13,7 @@ function randomwalk(g::AG, s::Integer, niter::Integer) where AG <: AbstractGraph while i <= niter push!(visited, currs) i += 1 - nbrs = out_neighbors(g, currs) + nbrs = outneighbors(g, currs) length(nbrs) == 0 && break currs = rand(nbrs) end @@ -29,7 +29,7 @@ vector of vertices visited in order. """ function non_backtracking_randomwalk end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function non_backtracking_randomwalk{T, AG<:AbstractGraph{T}}(g::AG::(!IsDirected), s::Integer, niter::Integer) +@traitfn function non_backtracking_randomwalk(g::AG::(!IsDirected), s::Integer, niter::Integer) where {T, AG<:AbstractGraph{T}} s in vertices(g) || throw(BoundsError()) visited = Vector{T}() sizehint!(visited, niter) @@ -39,7 +39,7 @@ function non_backtracking_randomwalk end push!(visited, currs) i += 1 - nbrs = out_neighbors(g, currs) + nbrs = outneighbors(g, currs) length(nbrs) == 0 && return visited[1:(i - 1)] prev = currs currs = rand(nbrs) @@ -47,7 +47,7 @@ function non_backtracking_randomwalk end while i <= niter push!(visited, currs) i += 1 - nbrs = out_neighbors(g, currs) + nbrs = outneighbors(g, currs) length(nbrs) == 1 && break idnext = rand(1:(length(nbrs) - 1)) next = nbrs[idnext] @@ -61,7 +61,7 @@ function non_backtracking_randomwalk end end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function non_backtracking_randomwalk{T, AG<:AbstractGraph{T}}(g::AG::IsDirected, s::Integer, niter::Integer) +@traitfn function non_backtracking_randomwalk(g::AG::IsDirected, s::Integer, niter::Integer) where {T, AG<:AbstractGraph{T}} s in vertices(g) || throw(BoundsError()) visited = Vector{T}() sizehint!(visited, niter) @@ -72,7 +72,7 @@ end while i <= niter push!(visited, currs) i += 1 - nbrs = out_neighbors(g, currs) + nbrs = outneighbors(g, currs) length(nbrs) == 0 && break next = rand(nbrs) if next == prev @@ -107,7 +107,7 @@ function saw(g::AG, s::Integer, niter::Integer) where AG <: AbstractGraph{T} whe push!(visited, currs) push!(svisited, currs) i += 1 - nbrs = setdiff(Set(out_neighbors(g, currs)), svisited) + nbrs = setdiff(Set(outneighbors(g, currs)), svisited) length(nbrs) == 0 && break currs = rand(collect(nbrs)) end diff --git a/src/utils.jl b/src/utils.jl index 5a76008ce..fa0e33f05 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -9,7 +9,7 @@ Sample `k` element from array `a` without repetition and eventually excluding el ### Implementation Notes Changes the order of the elements in `a`. For a non-mutating version, see [`sample`](@ref). """ -function sample!(rng::AbstractRNG, a::AbstractVector, k::Integer; exclude = ()) +function sample!(rng::Random.AbstractRNG, a::AbstractVector, k::Integer; exclude = ()) minsize = k + length(exclude) length(a) < minsize && throw(ArgumentError("vector must be at least size $minsize")) res = Vector{eltype(a)}() @@ -42,7 +42,7 @@ Unlike [`sample!`](@ref), does not produce side effects. """ sample(a::UnitRange, k::Integer; exclude = ()) = sample!(getRNG(), collect(a), k; exclude = exclude) -getRNG(seed::Integer = -1) = seed >= 0 ? MersenneTwister(seed) : Base.Random.GLOBAL_RNG +getRNG(seed::Integer = -1) = seed >= 0 ? Random.MersenneTwister(seed) : Random.GLOBAL_RNG """ insorted(item, collection) @@ -53,4 +53,3 @@ Return true if `item` is in sorted collection `collection`. Does not verify that `collection` is sorted. """ insorted(item, collection) = !isempty(searchsorted(collection, item)) - diff --git a/test/biconnectivity/articulation.jl b/test/biconnectivity/articulation.jl index 36eeb999f..3df06873b 100644 --- a/test/biconnectivity/articulation.jl +++ b/test/biconnectivity/articulation.jl @@ -19,21 +19,21 @@ add_edge!(gint, 7, 12) for g in testgraphs(gint) - art = @inferred(articulation(g)) - ans = [1, 7, 8, 12] - @test art == ans + art = @inferred(articulation(g)) + ans = [1, 7, 8, 12] + @test art == ans end for level in 1:6 btree = LightGraphs.BinaryTree(level) for tree in [btree, Graph{UInt8}(btree), Graph{Int16}(btree)] - artpts = @inferred(articulation(tree)) - @test artpts == collect(1:(2^(level - 1) - 1)) + artpts = @inferred(articulation(tree)) + @test artpts == collect(1:(2^(level - 1) - 1)) end end - hint = LightGraphs.blkdiag(WheelGraph(5), WheelGraph(5)) + hint = SparseArrays.blockdiag(WheelGraph(5), WheelGraph(5)) add_edge!(hint, 5, 6) for h in (hint, Graph{UInt8}(hint), Graph{Int16}(hint)) - @test @inferred(articulation(h)) == [5, 6] + @test @inferred(articulation(h)) == [5, 6] end end diff --git a/test/biconnectivity/biconnect.jl b/test/biconnectivity/biconnect.jl index 92755a2cc..adda7bdc7 100644 --- a/test/biconnectivity/biconnect.jl +++ b/test/biconnectivity/biconnect.jl @@ -22,8 +22,8 @@ [Edge(11, 12)]] for g in testgraphs(gint) - bcc = @inferred(biconnected_components(g)) - @test bcc == a + bcc = @inferred(biconnected_components(g)) + @test bcc == a end g = SimpleGraph(4) @@ -38,13 +38,13 @@ add_edge!(h, 3, 4) add_edge!(h, 1, 4) - gint = blkdiag(g, h) + gint = SparseArrays.blockdiag(g, h) add_edge!(gint, 4, 5) a = [[Edge(5, 8), Edge(7, 8), Edge(6, 7), Edge(5, 6)], [Edge(4, 5)], [Edge(1, 4), Edge(3, 4), Edge(2, 3), Edge(1, 2)]] for g in testgraphs(gint) - bcc = @inferred(biconnected_components(g)) - @test bcc == a + bcc = @inferred(biconnected_components(g)) + @test bcc == a end end diff --git a/test/centrality/betweenness.jl b/test/centrality/betweenness.jl index 6823a1ca2..6a414f426 100644 --- a/test/centrality/betweenness.jl +++ b/test/centrality/betweenness.jl @@ -7,7 +7,7 @@ gint = loadgraph(joinpath(testdir, "testdata", "graph-50-500.jgz"), "graph-50-500") - c = vec(readcsv(joinpath(testdir, "testdata", "graph-50-500-bc.txt"))) + c = vec(readdlm(joinpath(testdir, "testdata", "graph-50-500-bc.txt"), ',')) for g in testdigraphs(gint) z = @inferred(betweenness_centrality(g)) zp = @inferred(parallel_betweenness_centrality(g)) @@ -17,8 +17,8 @@ y = @inferred(betweenness_centrality(g, endpoints=true, normalize=false)) yp = parallel_betweenness_centrality(g, endpoints=true, normalize=false) @test all(isapprox(y, yp)) - @test round.(y[1:3], 4) == - round.([122.10760591498584, 159.0072453120582, 176.39547945994505], 4) + @test round.(y[1:3], digits=4) == + round.([122.10760591498584, 159.0072453120582, 176.39547945994505], digits=4) diff --git a/test/centrality/eigenvector.jl b/test/centrality/eigenvector.jl index 86e396c34..764527b69 100644 --- a/test/centrality/eigenvector.jl +++ b/test/centrality/eigenvector.jl @@ -4,13 +4,13 @@ for g in testgraphs(g1) y = @inferred(eigenvector_centrality(g)) - @test round.(y, 3) == round.([ + @test round.(y, digits=3) == round.([ 0.3577513877490464, 0.3577513877490464, 0.5298987782873977, 0.5298987782873977, 0.4271328349194304 - ], 3) + ], digits=3) end for g in testdigraphs(g2) y = @inferred(eigenvector_centrality(g)) - @test round.(y, 3) == round.([0.5, 0.5, 0.5, 0.5], 3) + @test round.(y, digits=3) == round.([0.5, 0.5, 0.5, 0.5], digits=3) end end diff --git a/test/centrality/katz.jl b/test/centrality/katz.jl index 05eead525..99b38d249 100644 --- a/test/centrality/katz.jl +++ b/test/centrality/katz.jl @@ -3,6 +3,6 @@ add_edge!(g5, 1, 2); add_edge!(g5, 2, 3); add_edge!(g5, 1, 3); add_edge!(g5, 3, 4) for g in testdigraphs(g5) z = @inferred(katz_centrality(g, 0.4)) - @test round.(z, 2) == [0.32, 0.44, 0.62, 0.56] + @test round.(z, digits=2) == [0.32, 0.44, 0.62, 0.56] end end diff --git a/test/centrality/pagerank.jl b/test/centrality/pagerank.jl index 6dafedfd1..9e4490994 100644 --- a/test/centrality/pagerank.jl +++ b/test/centrality/pagerank.jl @@ -3,11 +3,11 @@ # M = google_matrix(g, α) p = fill(1/nv(g), nv(g)) danglingnodes = outdegree(g) .== 0 - M = float(full(adjacency_matrix(g))) + M = Matrix{Float64}(adjacency_matrix(g)) M = M' - M[:, danglingnodes] = sum(danglingnodes) ./ nv(g) - M = M * Diagonal(1./sum(M,1)[:]) - @assert all(1.01 .>= sum(M, 1).>=0.999) + M[:, danglingnodes] .= sum(danglingnodes) ./ nv(g) + M = M * LinearAlgebra.Diagonal(1 ./ sum(M, dims=1)[:]) + @assert all(1.01 .>= sum(M, dims=1).>=0.999) # v = inv(I-β*M) * ((1-β)/nv(g) * ones(nv(g), 1)) v = inv(I-α*M) * ((1-α)/nv(g) * ones(nv(g), 1)) return v @@ -16,10 +16,10 @@ function google_matrix(g::AbstractGraph, α=0.85::Real) p = fill(1/nv(g), nv(g)) danglingnodes = outdegree(g) .== 0 - M = float(full(adjacency_matrix(g))) + M = Matrix{Float64}(adjacency_matrix(g)) @show M = M' M[:, danglingnodes] = sum(danglingnodes) ./ nv(g) - @show M = M * Diagonal(1./sum(M,1)[:]) + @show M = M * LinearAlgebra.Diagonal(1 ./ sum(M, dims=1)[:]) @show sum(M,1) @assert all(1.01 .>= sum(M, 1).>=0.999) return α*M .+ (1-α)*p diff --git a/test/centrality/radiality.jl b/test/centrality/radiality.jl index 0bbaaddb0..6d333432e 100644 --- a/test/centrality/radiality.jl +++ b/test/centrality/radiality.jl @@ -1,7 +1,7 @@ @testset "Radiality" begin gint = loadgraph(joinpath(testdir, "testdata", "graph-50-500.jgz"), "graph-50-500") - c = vec(readcsv(joinpath(testdir, "testdata", "graph-50-500-rc.txt"))) + c = vec(readdlm(joinpath(testdir, "testdata", "graph-50-500-rc.txt"), ',')) for g in testdigraphs(gint) z = @inferred(radiality_centrality(g)) zp = @inferred(parallel_radiality_centrality(g)) diff --git a/test/centrality/stress.jl b/test/centrality/stress.jl index 14b625140..1423332b7 100644 --- a/test/centrality/stress.jl +++ b/test/centrality/stress.jl @@ -1,7 +1,7 @@ @testset "Stress" begin gint = loadgraph(joinpath(testdir, "testdata", "graph-50-500.jgz"), "graph-50-500") - c = vec(readcsv(joinpath(testdir, "testdata", "graph-50-500-sc.txt"))) + c = vec(readdlm(joinpath(testdir, "testdata", "graph-50-500-sc.txt"), ',')) for g in testdigraphs(gint) z = @inferred(stress_centrality(g)) zp = @inferred(parallel_stress_centrality(g)) diff --git a/test/community/clique_percolation.jl b/test/community/clique_percolation.jl new file mode 100644 index 000000000..190634f20 --- /dev/null +++ b/test/community/clique_percolation.jl @@ -0,0 +1,19 @@ +@testset "Clique percolation" begin + function setofsets(array_of_arrays) + Set(map(BitSet, array_of_arrays)) + end + + function test_cliques(graph, expected) + # Make test results insensitive to ordering + Set(clique_percolation(graph)) == setofsets(expected) + end + + g = Graph(5) + add_edge!(g, 1, 2) + add_edge!(g, 2, 3) + add_edge!(g, 3, 1) + add_edge!(g, 1, 4) + add_edge!(g, 4, 5) + add_edge!(g, 5, 1) + @test test_cliques(g, Array[[1, 2, 3], [1, 4, 5]]) +end diff --git a/test/community/core-periphery.jl b/test/community/core-periphery.jl index eb1f2427d..570fbe9d0 100644 --- a/test/community/core-periphery.jl +++ b/test/community/core-periphery.jl @@ -10,7 +10,7 @@ end g10 = StarGraph(10) - g10 = blkdiag(g10, g10) + g10 = SparseArrays.blockdiag(g10, g10) add_edge!(g10, 1, 11) for g in testgraphs(g10) c = @inferred(core_periphery_deg(g)) diff --git a/test/community/label_propagation.jl b/test/community/label_propagation.jl index 5b5ae1d0d..037c0f2cb 100644 --- a/test/community/label_propagation.jl +++ b/test/community/label_propagation.jl @@ -2,17 +2,17 @@ n = 10 g10 = CompleteGraph(n) for g in testgraphs(g10) - z = copy(g) - for k = 2:5 - z = blkdiag(z, g) - add_edge!(z, (k - 1) * n, k * n) - c, ch = @inferred(label_propagation(z)) - a = collect(n:n:(k * n)) - a = Int[div(i - 1, n) + 1 for i = 1:(k * n)] + z = copy(g) + for k = 2:5 + z = SparseArrays.blockdiag(z, g) + add_edge!(z, (k - 1) * n, k * n) + c, ch = @inferred(label_propagation(z)) + a = collect(n:n:(k * n)) + a = Int[div(i - 1, n) + 1 for i = 1:(k * n)] # check the number of communities - @test length(unique(a)) == length(unique(c)) + @test length(unique(a)) == length(unique(c)) # check the partition - @test a == c - end + @test a == c + end end end diff --git a/test/connectivity.jl b/test/connectivity.jl index 2bfacc6bf..1471f8ea1 100644 --- a/test/connectivity.jl +++ b/test/connectivity.jl @@ -128,12 +128,12 @@ # figure 2 example fig2 = spzeros(5, 5) - fig2[[3, 10, 11, 13, 14, 17, 18, 19, 22]] = 1 + fig2[[3, 10, 11, 13, 14, 17, 18, 19, 22]] .= 1 fig2 = SimpleDiGraph(fig2) # figure 3 example fig3 = spzeros(8, 8) - fig3[[1, 7, 9, 13, 14, 15, 18, 20, 23, 27, 28, 31, 33, 34, 37, 45, 46, 49, 57, 63, 64]] = 1 + fig3[[1, 7, 9, 13, 14, 15, 18, 20, 23, 27, 28, 31, 33, 34, 37, 45, 46, 49, 57, 63, 64]] .= 1 fig3 = SimpleDiGraph(fig3) scc_fig3 = Vector[[3, 4], [2, 5, 6], [8], [1, 7]] fig3_cond = SimpleDiGraph(4); @@ -143,15 +143,13 @@ # construct a n-number edge ring graph (period = n) n = 10 - n_ring_m = spdiagm(ones(n - 1), 1, n, n); n_ring_m[end, 1] = 1 - n_ring = SimpleDiGraph(n_ring_m) - + n_ring = CycleDiGraph(n) n_ring_shortcut = copy(n_ring); add_edge!(n_ring_shortcut, 1, 4) # figure 8 example fig8 = spzeros(6, 6) - fig8[[2, 10, 13, 21, 24, 27, 35]] = 1 + fig8[[2, 10, 13, 21, 24, 27, 35]] .= 1 fig8 = SimpleDiGraph(fig8) @test Set(@inferred(strongly_connected_components(fig1))) == Set(scc_fig1) diff --git a/test/digraph/transitivity.jl b/test/digraph/transitivity.jl index af9eb2d4a..4ee124735 100644 --- a/test/digraph/transitivity.jl +++ b/test/digraph/transitivity.jl @@ -27,4 +27,79 @@ @test newcircle == @inferred(transitiveclosure!(circle, true)) @test ne(circle) == 16 end + + # transitivereduction + let + # transitive reduction of the nullgraph is again a nullgraph + nullgraph = SimpleDiGraph(0) + for g in testdigraphs(nullgraph) + @test g == @inferred(transitivereduction(g)) + end + + # transitive reduction of a path is a path again + pathgraph = PathDiGraph(10) + for g in testdigraphs(pathgraph) + @test g == @inferred(transitivereduction(g)) + end + + # transitive reduction of the transitive closure of a path is a path again + for g in testdigraphs(pathgraph) + gclosure = transitiveclosure(g) + @test g == @inferred(transitivereduction(gclosure)) + end + + # Transitive reduction of a complete graph should be s simple cycle + completegraph = CompleteDiGraph(9) + for g in testdigraphs(completegraph) + greduced = @inferred(transitivereduction(g)) + @test length(strongly_connected_components(greduced)) == 1 && + ne(greduced) == nv(g) + end + + # transitive reduction a graph with no edges is the same graph again + noedgegraph = SimpleDiGraph(7) + for g in testdigraphs(noedgegraph) + @test g == @inferred(transitivereduction(g)) + end + + # transitve reduction should maintain a selfloop only when selflooped==true + selfloopgraph = SimpleDiGraph(1) + add_edge!(selfloopgraph, 1, 1) + for g in testdigraphs(selfloopgraph) + @test g == @inferred(transitivereduction(g; selflooped=true)); + @test g != @inferred(transitivereduction(g; selflooped=false)); + end + + # transitive should not maintain selfloops for strongly connected components + # of size > 1 + selfloopgraph2 = SimpleDiGraph(2) + add_edge!(selfloopgraph2, 1, 1) + add_edge!(selfloopgraph2, 1, 2) + add_edge!(selfloopgraph2, 2, 1) + add_edge!(selfloopgraph2, 2, 2) + for g in testdigraphs(selfloopgraph2) + @test g != @inferred(transitivereduction(g; selflooped=true)); + end + + # directed barbell graph should result in two cycles connected by a single edge + barbellgraph = SimpleDiGraph(9) + for i in 1:4, j in 1:4 + i == j && continue + add_edge!(barbellgraph, i, j) + add_edge!(barbellgraph, j, i) + end + for i in 5:9, j in 5:9 + i == j && continue + add_edge!(barbellgraph, i, j) + add_edge!(barbellgraph, j, i) + end + add_edge!(barbellgraph, 1, 5); + for g in testdigraphs(barbellgraph) + greduced = @inferred(transitivereduction(g)) + scc = strongly_connected_components(greduced) + @test Set(scc) == Set([[1:4;], [5:9;]]) + @test ne(greduced) == 10 + @test length(weakly_connected_components(greduced)) == 1 + end + end end diff --git a/test/flow/boykov_kolmogorov.jl b/test/flow/boykov_kolmogorov.jl deleted file mode 100644 index 535369c57..000000000 --- a/test/flow/boykov_kolmogorov.jl +++ /dev/null @@ -1,30 +0,0 @@ -@testset "Boykov Kolmogorov" begin - # construct graph - gg = SimpleDiGraph(3) - add_edge!(gg, 1, 2) - add_edge!(gg, 2, 3) - - # source and sink terminals - source, target = 1, 3 - - - for g in testdigraphs(gg) - # default capacity - capacity_matrix = LightGraphs.DefaultCapacity(g) - residual_graph = @inferred(LightGraphs.residual(g)) - T = eltype(g) - flow_matrix = zeros(T, 3, 3) - TREE = zeros(T, 3) - TREE[source] = T(1) - TREE[target] = T(2) - PARENT = zeros(T, 3) - A = [T(source), T(target)] -# see https://github.com/JuliaLang/julia/issues/21077 -# @show("testing $g with eltype $T, residual_graph type is $(eltype(residual_graph)), flow_matrix type is $(eltype(flow_matrix)), capacity_matrix type is $(eltype(capacity_matrix))") - path = LightGraphs.find_path!( - residual_graph, source, target, flow_matrix, - capacity_matrix, PARENT, TREE, A) - - @test path == [1, 2, 3] - end -end diff --git a/test/flow/dinic.jl b/test/flow/dinic.jl deleted file mode 100644 index 7fd9ff073..000000000 --- a/test/flow/dinic.jl +++ /dev/null @@ -1,62 +0,0 @@ -@testset "Dinic" begin - # Construct DiGraph - flow_graph = SimpleDiGraph(8) - - # Load custom dataset - flow_edges = [ - (1, 2, 10), (1, 3, 5), (1, 4, 15), (2, 3, 4), (2, 5, 9), - (2, 6, 15), (3, 4, 4), (3, 6, 8), (4, 7, 16), (5, 6, 15), - (5, 8, 10), (6, 7, 15), (6, 8, 10), (7, 3, 6), (7, 8, 10) - ] - - capacity_matrix = zeros(Int, nv(flow_graph), nv(flow_graph)) - - for e in flow_edges - u, v, f = e - add_edge!(flow_graph, u, v) - capacity_matrix[u, v] = f - end - - # Construct the residual graph - for fg in (flow_graph, DiGraph{UInt8}(flow_graph), DiGraph{Int16}(flow_graph)) - residual_graph = @inferred(LightGraphs.residual(fg)) - - # Test with default distances - @test @inferred(LightGraphs.dinic_impl(residual_graph, 1, 8, LightGraphs.DefaultCapacity(residual_graph)))[1] == 3 - - # Test with capacity matrix - @test @inferred(LightGraphs.dinic_impl(residual_graph, 1, 8, capacity_matrix))[1] == 28 - - # Test on disconnected graphs - function test_blocking_flow(residual_graph, source, target, capacity_matrix, flow_matrix) - #disconnect source - h = copy(residual_graph) - for dst in collect(neighbors(residual_graph, source)) - rem_edge!(h, source, dst) - end - @test @inferred(LightGraphs.blocking_flow(h, source, target, capacity_matrix, flow_matrix)) == 0 - - #disconnect target and add unreachable vertex - h = copy(residual_graph) - for src in collect(in_neighbors(residual_graph, target)) - rem_edge!(h, src, target) - end - @test @inferred(LightGraphs.blocking_flow(h, source, target, capacity_matrix, flow_matrix)) == 0 - - # unreachable vertex (covers the case where a vertex isn't reachable from the source) - h = copy(residual_graph) - add_vertex!(h) - add_edge!(h, nv(residual_graph) + 1, target) - capacity_matrix_ = vcat(hcat(capacity_matrix, zeros(Int, nv(residual_graph))), zeros(Int, 1, nv(residual_graph) + 1)) - flow_graph_ = vcat(hcat(flow_matrix, zeros(Int, nv(residual_graph))), zeros(Int, 1, nv(residual_graph) + 1)) - - @test @inferred(LightGraphs.blocking_flow(h, source, target, capacity_matrix_, flow_graph_)) > 0 - - #test with connected graph - @test @inferred(LightGraphs.blocking_flow(residual_graph, source, target, capacity_matrix, flow_matrix)) > 0 - end - - flow_matrix = zeros(Int, nv(residual_graph), nv(residual_graph)) - test_blocking_flow(residual_graph, 1, 8, capacity_matrix, flow_matrix) - end -end diff --git a/test/flow/edmonds_karp.jl b/test/flow/edmonds_karp.jl deleted file mode 100644 index baa8e9fa8..000000000 --- a/test/flow/edmonds_karp.jl +++ /dev/null @@ -1,59 +0,0 @@ -@testset "Edmonds Karp" begin - # Construct DiGraph - flow_graph = SimpleDiGraph(8) - - # Load custom dataset - flow_edges = [ - (1, 2, 10), (1, 3, 5), (1, 4, 15), (2, 3, 4), (2, 5, 9), - (2, 6, 15), (3, 4, 4), (3, 6, 8), (4, 7, 16), (5, 6, 15), - (5, 8, 10), (6, 7, 15), (6, 8, 10), (7, 3, 6), (7, 8, 10) - ] - - for fg in (flow_graph, DiGraph{UInt8}(flow_graph), DiGraph{Int16}(flow_graph)) - capacity_matrix = zeros(Int, 8, 8) - for e in flow_edges - u, v, f = e - add_edge!(fg, u, v) - capacity_matrix[u, v] = f - end - residual_graph = @inferred(LightGraphs.residual(fg)) - - # Test with default distances - @test @inferred(LightGraphs.edmonds_karp_impl(residual_graph, 1, 8, LightGraphs.DefaultCapacity(residual_graph)))[1] == 3 - - # Test with capacity matrix - @test @inferred(LightGraphs.edmonds_karp_impl(residual_graph, 1, 8, capacity_matrix))[1] == 28 - - # Test the types of the values returned by fetch_path - function test_find_path_types(residual_graph, s, t, flow_matrix, capacity_matrix) - v, P, S, flag = LightGraphs.fetch_path(residual_graph, s, t, flow_matrix, capacity_matrix) - @test typeof(P) == Vector{Int} - @test typeof(S) == Vector{Int} - @test typeof(flag) == Int - @test typeof(v) == eltype(residual_graph) - end - - # Test the value of the flags returned. - function test_find_path_disconnected(residual_graph, s, t, flow_matrix, capacity_matrix) - h = copy(residual_graph) - for dst in collect(neighbors(residual_graph, s)) - rem_edge!(residual_graph, s, dst) - end - v, P, S, flag = LightGraphs.fetch_path(residual_graph, s, t, flow_matrix, capacity_matrix) - @test flag == 1 - for dst in collect(neighbors(h, t)) - rem_edge!(h, t, dst) - end - v, P, S, flag = LightGraphs.fetch_path(h, s, t, flow_matrix, capacity_matrix) - @test flag == 0 - for i in collect(in_neighbors(h, t)) - rem_edge!(h, i, t) - end - v, P, S, flag = LightGraphs.fetch_path(h, s, t, flow_matrix, capacity_matrix) - @test flag == 2 - end - flow_matrix = zeros(Int, nv(residual_graph), nv(residual_graph)) - test_find_path_types(residual_graph, 1, 8, flow_matrix, capacity_matrix) - test_find_path_disconnected(residual_graph, 1, 8, flow_matrix, capacity_matrix) - end -end diff --git a/test/flow/maximum_flow.jl b/test/flow/maximum_flow.jl deleted file mode 100644 index 8cfa4895d..000000000 --- a/test/flow/maximum_flow.jl +++ /dev/null @@ -1,57 +0,0 @@ -@testset "Maximum flow" begin - #### Graphs for testing - graphs = [ - # Graph with 8 vertices - (8, - [ - (1, 2, 10), (1, 3, 5), (1, 4, 15), (2, 3, 4), (2, 5, 9), - (2, 6, 15), (3, 4, 4), (3, 6, 8), (4, 7, 16), (5, 6, 15), - (5, 8, 10), (6, 7, 15), (6, 8, 10), (7, 3, 6), (7, 8, 10) - ], - 1, 8, # source/target - 3, # answer for default capacity - 28, # answer for custom capacity - 15, 5 # answer for restricted capacity/restriction - ), - - # Graph with 6 vertices - (6, - [ - (1, 2, 9), (1, 3, 9), (2, 3, 10), (2, 4, 8), (3, 4, 1), - (3, 5, 3), (5, 4, 8), (4, 6, 10), (5, 6, 7) - ], - 1, 6, # source/target - 2, # answer for default capacity - 12, # answer for custom capacity - 8, 5 # answer for restricted capacity/restriction - ) - ] - - for (nvertices, flow_edges, s, t, fdefault, fcustom, frestrict, caprestrict) in graphs - flow_graph = SimpleDiGraph(nvertices) - for g in testdigraphs(flow_graph) - capacity_matrix = zeros(Int, nvertices, nvertices) - for e in flow_edges - u, v, f = e - add_edge!(g, u, v) - capacity_matrix[u, v] = f - end - - # Test DefaultCapacity - d = @inferred(LightGraphs.DefaultCapacity(g)) - T = eltype(d) - @test typeof(d) <: AbstractMatrix{T} - @test d[s, t] == 0 - @test size(d) == (nvertices, nvertices) - @test typeof(transpose(d)) <: LightGraphs.DefaultCapacity - @test typeof(ctranspose(d)) <: LightGraphs.DefaultCapacity - - # Test all algorithms - type instability in PushRelabel #553 - for ALGO in [EdmondsKarpAlgorithm, DinicAlgorithm, BoykovKolmogorovAlgorithm, PushRelabelAlgorithm] - @test maximum_flow(g, s, t, algorithm=ALGO())[1] == fdefault - @test maximum_flow(g, s, t, capacity_matrix, algorithm=ALGO())[1] == fcustom - @test maximum_flow(g, s, t, capacity_matrix, algorithm=ALGO(), restriction=caprestrict)[1] == frestrict - end - end - end -end diff --git a/test/flow/multiroute_flow.jl b/test/flow/multiroute_flow.jl deleted file mode 100644 index 4300c880c..000000000 --- a/test/flow/multiroute_flow.jl +++ /dev/null @@ -1,86 +0,0 @@ -@testset "Multiroute flow" begin - #### Graphs for testing - graphs = [ - # Graph with 8 vertices - (8, - [ - (1, 2, 10), (1, 3, 5), (1, 4, 15), (2, 3, 4), (2, 5, 9), - (2, 6, 15), (3, 4, 4), (3, 6, 8), (4, 7, 16), (5, 6, 15), - (5, 8, 10), (6, 7, 15), (6, 8, 10), (7, 3, 6), (7, 8, 10), - (8, 1, 8) # Reverse edge to test the slope in EMRF - ], - 1, 8, # source/target - [28, 28, 15, 0], # answer for 1 to 4 route(s) flows - [(0., 0., 3), (5., 15., 2), # breaking points - (10., 25., 1), (13., 28., 0)], - 28. # value for 1.5 routes - ), - - # Graph with 6 vertices - (6, - [ - (1, 2, 9), (1, 3, 9), (2, 3, 10), (2, 4, 8), (3, 4, 1), - (3, 5, 3), (5, 4, 8), (4, 6, 10), (5, 6, 7) - ], - 1, 6, # source/target - [12, 6, 0], # answer for 1 to 3 route(s) flows - [(0., 0., 2), (3., 6., 1), (9., 12., 0)], # breaking points - 9. # value for 1.5 routes - ), - - # Graph with 7 vertices - (7, - [ - (1, 2, 1), (1, 3, 2), (1, 4, 3), (1, 5, 4), (1, 6, 5), - (2, 7, 1), (3, 7, 2), (4, 7, 3), (5, 7, 4), (6, 7, 5) - ], - 1, 7, # source/target - [15, 15, 15, 12, 5, 0], # answer for 1 to 6 route(s) flows - [(0., 0., 5), (1., 5., 4), (2., 9., 3), # breaking points - (3., 12., 2), (4., 14., 1), (5., 15., 0)], - 15. # value for 1.5 routes - ), - - # Graph with 6 vertices - (6, - [ - (1, 2, 1), (1, 3, 1), (1, 4, 2), (1, 5, 2), - (2, 6, 1), (3, 6, 1), (4, 6, 2), (5, 6, 2), - ], - 1, 6, # source/target - [6, 6, 6, 4, 0], # answer for 1 to 5 route(s) flows - [(0., 0., 4), (1., 4., 2), (2., 6., 0)], # breaking points - 6. # value for 1.5 routes - ) - ] - - for (nvertices, flow_edges, s, t, froutes, breakpts, ffloat) in graphs - flow_graph = SimpleDiGraph(nvertices) - for g in testdigraphs(flow_graph) - capacity_matrix = zeros(Int, nvertices, nvertices) - for e in flow_edges - u, v, f = e - add_edge!(g, u, v) - capacity_matrix[u, v] = f - end - # Test ExtendedMultirouteFlowAlgorithm when the number of routes is either - # Noninteger or 0 (the algorithm returns the breaking points) - @test multiroute_flow(g, s, t, capacity_matrix) == breakpts - @test multiroute_flow(g, s, t, capacity_matrix, routes=1.5)[1] ≈ ffloat - @test multiroute_flow(breakpts, 1.5)[2] ≈ ffloat - - # Test all other algorithms - PR is unstable - see #553 - for AlgoFlow in [EdmondsKarpAlgorithm, DinicAlgorithm, BoykovKolmogorovAlgorithm, PushRelabelAlgorithm] - # When the input are breaking points and routes number - @test multiroute_flow(breakpts, 1.5, g, s, t, capacity_matrix)[1] ≈ ffloat - for AlgoMrf in [ExtendedMultirouteFlowAlgorithm, KishimotoAlgorithm] - for (k, val) in enumerate(froutes) - @test multiroute_flow(g, s, t, capacity_matrix, - flow_algorithm = AlgoFlow(), mrf_algorithm = AlgoMrf(), - routes = k)[1] ≈ val - end - end - end - end - end -end diff --git a/test/flow/push_relabel.jl b/test/flow/push_relabel.jl deleted file mode 100644 index ee8b3bdde..000000000 --- a/test/flow/push_relabel.jl +++ /dev/null @@ -1,94 +0,0 @@ -@testset "Push relabel" begin - # Construct DiGraph - flow_graph = SimpleDiGraph(8) - - # Load custom dataset - flow_edges = [ - (1, 2, 10), (1, 3, 5), (1, 4, 15), (2, 3, 4), (2, 5, 9), - (2, 6, 15), (3, 4, 4), (3, 6, 8), (4, 7, 16), (5, 6, 15), - (5, 8, 10), (6, 7, 15), (6, 8, 10), (7, 3, 6), (7, 8, 10) - ] - - capacity_matrix = zeros(Int, 8, 8) - - for e in flow_edges - u, v, f = e - add_edge!(flow_graph, u, v) - capacity_matrix[u, v] = f - end - for g in testdigraphs(flow_graph) - residual_graph = @inferred(LightGraphs.residual(g)) - - # Test enqueue_vertex - Q = Array{Int,1}() - excess = [0, 1, 0, 1] - active = [false, false, true, true] - @test @inferred(LightGraphs.enqueue_vertex!(Q, 1, active, excess)) == nothing - @test @inferred(LightGraphs.enqueue_vertex!(Q, 3, active, excess)) == nothing - @test @inferred(LightGraphs.enqueue_vertex!(Q, 4, active, excess)) == nothing - @test length(Q) == 0 - @test @inferred(LightGraphs.enqueue_vertex!(Q, 2, active, excess)) == nothing - @test length(Q) == 1 - - # Test push_flow - Q = Array{Int,1}() - excess = [15, 1, 1, 0, 0, 0, 0, 0] - height = [8, 0, 0, 0, 0, 0, 0, 0] - active = [true, false, false, false, false, false, false, true] - flow_matrix = zeros(Int, 8, 8) - @test @inferred(LightGraphs.push_flow!(residual_graph, 1, 2, capacity_matrix, flow_matrix, excess, height, active, Q)) == nothing - @test length(Q) == 1 - @test flow_matrix[1, 2] == 10 - @test @inferred(LightGraphs.push_flow!(residual_graph, 2, 3, capacity_matrix, flow_matrix, excess, height, active, Q)) == nothing - @test length(Q) == 1 - @test flow_matrix[2, 3] == 0 - - # Test gap - Q = Array{Int,1}() - excess = [15, 1, 1, 0, 0, 0, 0, 0] - height = [8, 2, 2, 1, 3, 3, 4, 5] - active = [true, false, false, false, false, false, false, true] - count = [0, 1, 2, 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0] - flow_matrix = zeros(Int, 8, 8) - - @test @inferred(LightGraphs.gap!(residual_graph, 1, excess, height, active, count, Q)) == nothing - @test length(Q) == 2 - - # Test relabel - Q = Array{Int,1}() - excess = [15, 1, 1, 0, 0, 0, 0, 0] - height = [8, 1, 1, 1, 1, 1, 1, 0] - active = [true, false, false, false, false, false, false, true] - count = [1, 6, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0] - flow_matrix = zeros(Int, 8, 8) - - @test @inferred(LightGraphs.relabel!(residual_graph, 2, capacity_matrix, flow_matrix, excess, height, active, count, Q)) == nothing - @test length(Q) == 1 - - # Test discharge - Q = Array{Int,1}() - excess = [50, 1, 1, 0, 0, 0, 0, 0] - height = [8, 0, 0, 0, 0, 0, 0, 0] - active = [true, false, false, false, false, false, false, true] - count = [7, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] - flow_matrix = zeros(Int, 8, 8) - - @test @inferred(LightGraphs.discharge!(residual_graph, 1, capacity_matrix, flow_matrix, excess, height, active, count, Q)) == nothing - @test length(Q) == 3 - - # Test with default distances - @test LightGraphs.push_relabel(residual_graph, 1, 8, LightGraphs.DefaultCapacity(residual_graph))[1] == 3 - - # Test with capacity matrix - @test LightGraphs.push_relabel(residual_graph, 1, 8, capacity_matrix)[1] == 28 - end - # Non regression test added for #448 - M448 = [0 1 0 0 1 1 - 1 0 0 0 1 0 - 0 0 0 1 0 0 - 0 0 0 0 0 0 - 1 0 1 0 0 1 - 0 0 0 0 1 0] - g448 = SimpleDiGraph(M448) - @test maximum_flow(g448, 1, 2, M448, algorithm=PushRelabelAlgorithm())[1] == 1 -end diff --git a/test/generators/binomial.jl b/test/generators/binomial.jl index 37a741157..f7cf13a24 100644 --- a/test/generators/binomial.jl +++ b/test/generators/binomial.jl @@ -5,6 +5,7 @@ using Distributions using LightGraphs using StatsBase using Base.Test +import Random import Base: - import LightGraphs: randbn @@ -37,7 +38,7 @@ function binomial_test(n, p, s) @show dσ - lσ @test abs(dσ - lσ) / dσ < .10 end -srand(1234) +Random.srand(1234) n = 10000 p = 0.3 s = 100000 diff --git a/test/generators/randgraphs.jl b/test/generators/randgraphs.jl index 9ffb0e927..e3b7d1497 100644 --- a/test/generators/randgraphs.jl +++ b/test/generators/randgraphs.jl @@ -1,6 +1,6 @@ @testset "Randgraphs" begin - r1 = SimpleGraph(10,20) - r2 = SimpleDiGraph(5,10) + r1 = SimpleGraph(10, 20) + r2 = SimpleDiGraph(5, 10) @test nv(r1) == 10 @test ne(r1) == 20 @@ -11,15 +11,15 @@ @test eltype(Graph(0x5, 0x2)) == eltype(Graph(0x5, 2)) == UInt8 for T in [UInt8, Int8, UInt16, Int16, UInt32, Int32, UInt, Int] - @test eltype(Graph{T}(5,2)) == T - @test eltype(DiGraph{T}(5,2)) == T + @test eltype(Graph{T}(5, 2)) == T + @test eltype(DiGraph{T}(5, 2)) == T end - @test SimpleGraph(10,20,seed=3) == SimpleGraph(10,20,seed=3) - @test SimpleDiGraph(10,20,seed=3) == SimpleDiGraph(10,20,seed=3) - @test SimpleGraph(10,20,seed=3) == erdos_renyi(10,20,seed=3) - @test ne(Graph(10,40,seed=3)) == 40 - @test ne(DiGraph(10,80,seed=3)) == 80 + @test SimpleGraph(10, 20, seed=3) == SimpleGraph(10, 20, seed=3) + @test SimpleDiGraph(10, 20, seed=3) == SimpleDiGraph(10, 20, seed=3) + @test SimpleGraph(10, 20, seed=3) == erdos_renyi(10, 20, seed=3) + @test ne(Graph(10, 40, seed=3)) == 40 + @test ne(DiGraph(10, 80, seed=3)) == 80 er = erdos_renyi(10, 0.5) @test nv(er) == 10 @@ -32,8 +32,20 @@ @test nv(er) == 10 @test is_directed(er) == false + cl = expected_degree_graph(zeros(10), seed=17) + @test nv(cl) == 10 + @test ne(cl) == 0 + @test is_directed(cl) == false - ws = watts_strogatz(10,4,0.2) + cl = expected_degree_graph([3, 2, 1, 2], seed=17) + @test nv(cl) == 4 + @test is_directed(cl) == false + + cl = expected_degree_graph(fill(99, 100), seed=17) + @test nv(cl) == 100 + @test all(degree(cl) .> 90) + + ws = watts_strogatz(10, 4, 0.2) @test nv(ws) == 10 @test ne(ws) == 20 @test is_directed(ws) == false @@ -128,7 +140,7 @@ @test ne(rr) == 0 @test is_directed(rr) == false - rd = random_regular_digraph(10,0) + rd = random_regular_digraph(10, 0) @test nv(rd) == 10 @test ne(rd) == 0 @test is_directed(rd) @@ -146,7 +158,7 @@ @test degree(rr, v) == 50 end - rr = random_configuration_model(10, repmat([2,4] ,5), seed=3) + rr = random_configuration_model(10, repeat([2,4], 5), seed=3) @test nv(rr) == 10 @test ne(rr) == 15 @test is_directed(rr) == false @@ -159,7 +171,7 @@ @test num4 == 5 @test num2 == 5 - rr = random_configuration_model(1000, zeros(Int,1000)) + rr = random_configuration_model(1000, zeros(Int, 1000)) @test nv(rr) == 1000 @test ne(rr) == 0 @test is_directed(rr) == false @@ -173,13 +185,15 @@ @test nv(rd) == 1000 @test ne(rd) == 4000 @test is_directed(rd) - @test std(outdegree(rd)) == 0 + outdegree_rd = @inferred(outdegree(rd)) + @test all(outdegree_rd .== outdegree_rd[1]) rd = random_regular_digraph(1000, 4, dir=:in) @test nv(rd) == 1000 @test ne(rd) == 4000 @test is_directed(rd) - @test std(indegree(rd)) == 0 + indegree_rd = @inferred(indegree(rd)) + @test all(indegree_rd .== indegree_rd[1]) rr = random_regular_graph(10, 8, seed=4) @test nv(rr) == 10 @@ -242,13 +256,13 @@ bp = blockfractions(sbm, g) ./ (sizes * sizes') ratios = bp ./ (sbm.affinities ./ sum(sbm.affinities)) test_sbm(sbm, bp) - @test norm(collect(ratios)) < 0.25 + @test LinearAlgebra.norm(collect(ratios)) < 0.25 sizes = [200, 200, 100] internaldeg = 15 externaldeg = 6 - internalp = Float64[internaldeg/i for i in sizes] - externalp = externaldeg/sum(sizes) + internalp = Float64[internaldeg / i for i in sizes] + externalp = externaldeg / sum(sizes) numedges = internaldeg + externaldeg #+ sum(externaldeg.*sizes[2:end]) numedges *= div(sum(sizes), 2) sbm = StochasticBlockModel(internalp, externalp, sizes) @@ -260,23 +274,23 @@ bp = blockfractions(sbm, g) ./ (sizes * sizes') test_sbm(sbm, bp) ratios = bp ./ (sbm.affinities ./ sum(sbm.affinities)) - @test norm(collect(ratios)) < 0.25 + @test LinearAlgebra.norm(collect(ratios)) < 0.25 # check that average degree is not too high # factor of two is cushion for random process - @test mean(degree(g)) <= 4//2*numedges/sum(sizes) + @test mean(degree(g)) <= 4 // 2 * numedges / sum(sizes) # check that the internal degrees are higher than the external degrees # 5//4 is cushion for random process. - @test all(sum(bc-diagm(diag(bc)), 1) .<= 5//4 .* diag(bc)) + @test all(sum(bc - LinearAlgebra.diagm(0 => SparseArrays.diag(bc)), dims=1) .<= 5 // 4 .* SparseArrays.diag(bc)) - sbm2 = StochasticBlockModel(0.5*ones(4), 0.3, 10*ones(Int,4)) + sbm2 = StochasticBlockModel(0.5 * ones(4), 0.3, 10 * ones(Int, 4)) sbm = StochasticBlockModel(0.5, 0.3, 10, 4) @test sbm == sbm2 sbm.affinities[1,1] = 0 @test sbm != sbm2 - kg = @inferred kronecker(5,5) + kg = @inferred kronecker(5, 5) @test nv(kg) == 32 @test is_directed(kg) end diff --git a/test/generators/smallgraphs.jl b/test/generators/smallgraphs.jl index 73d4f84d4..6cf960ce0 100644 --- a/test/generators/smallgraphs.jl +++ b/test/generators/smallgraphs.jl @@ -24,7 +24,7 @@ @test nv(g) == 20 && ne(g) == 30 g = smallgraph(:frucht) - @test nv(g) == 20 && ne(g) == 18 + @test nv(g) == 12 && ne(g) == 18 g = smallgraph(:heawood) @test nv(g) == 14 && ne(g) == 21 diff --git a/test/generators/staticgraphs.jl b/test/generators/staticgraphs.jl index d8803d81e..79273bd36 100644 --- a/test/generators/staticgraphs.jl +++ b/test/generators/staticgraphs.jl @@ -60,8 +60,8 @@ I = [1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9] J = [2, 3, 4, 1, 5, 1, 6, 1, 5, 6, 7, 2, 4, 8, 3, 4, 9, 4, 8, 9, 5, 7, 6, 7] V = ones(Int, length(I)) - Adj = sparse(I, J, V) - @test Adj == sparse(g) + Adj = SparseArrays.sparse(I, J, V) + @test Adj == SparseArrays.sparse(g) g = @inferred(DoubleBinaryTree(3)) # [[3, 2, 8] @@ -81,8 +81,8 @@ I = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 12, 13, 14] J = [3, 2, 8, 4, 1, 5, 1, 6, 7, 2, 2, 3, 3, 10, 9, 1, 11, 8, 12, 8, 13, 14, 9, 9, 10, 10] V = ones(Int, length(I)) - Adj = sparse(I, J, V) - @test Adj == sparse(g) + Adj = SparseArrays.sparse(I, J, V) + @test Adj == SparseArrays.sparse(g) rg3 = @inferred(RoachGraph(3)) # [3] @@ -100,6 +100,6 @@ I = [1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 12, 12] J = [3, 4, 1, 5, 2, 6, 3, 7, 4, 8, 9, 8, 5, 10, 7, 6, 11, 10, 7, 8, 9, 12, 9, 12, 10, 11] V = ones(Int, length(I)) - Adj = sparse(I, J, V) - @test Adj == sparse(rg3) + Adj = SparseArrays.sparse(I, J, V) + @test Adj == SparseArrays.sparse(rg3) end diff --git a/test/graphcut/normalized_cut.jl b/test/graphcut/normalized_cut.jl index bde745955..6eaeedd82 100644 --- a/test/graphcut/normalized_cut.jl +++ b/test/graphcut/normalized_cut.jl @@ -33,7 +33,7 @@ @test labels == [1, 1, 1, 2, 2, 2] || labels == [2, 2, 2, 1, 1, 1] end - w = SparseMatrixCSC(w) + w = SparseArrays.SparseMatrixCSC(w) for g in testgraphs(gx) labels = @inferred(normalized_cut(g, 1, w)) @test labels == [1, 1, 1, 2, 2, 2] || labels == [2, 2, 2, 1, 1, 1] @@ -53,7 +53,7 @@ @test labels == [1, 1, 2, 2] || labels == [2, 2, 1, 1] end - w = SparseMatrixCSC(w) + w = SparseArrays.SparseMatrixCSC(w) for g in testgraphs(gx) labels = @inferred(normalized_cut(g, 0.1, w)) @test labels == [1, 1, 2, 2] || labels == [2, 2, 1, 1] @@ -83,7 +83,7 @@ return changes == length(unique(labels)) - 1 end - num_subgraphs = Vector{Int}(9) + num_subgraphs = Vector{Int}(undef, 9) for t in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] labels = @inferred(normalized_cut(g, t)) diff --git a/test/interface.jl b/test/interface.jl index b18919805..30a777cc3 100644 --- a/test/interface.jl +++ b/test/interface.jl @@ -19,18 +19,18 @@ mutable struct DummyEdge <: AbstractEdge{Int} end for graphfunbasic in [ nv, ne, vertices, edges, is_directed, - add_vertex!, edgetype, eltype, zero + edgetype, eltype, zero ] @test_throws ErrorException graphfunbasic(dummygraph) end for graphfun1int in [ - rem_vertex!, has_vertex, in_neighbors, out_neighbors + has_vertex, inneighbors, outneighbors ] @test_throws ErrorException graphfun1int(dummygraph, 1) end for graphfunedge in [ - has_edge, add_edge!, rem_edge! + has_edge, ] @test_throws ErrorException graphfunedge(dummygraph, dummyedge) end diff --git a/test/linalg/graphmatrices.jl b/test/linalg/graphmatrices.jl index 5bd7fb736..8da8a955b 100644 --- a/test/linalg/graphmatrices.jl +++ b/test/linalg/graphmatrices.jl @@ -2,7 +2,7 @@ @testset "Graph matrices" begin function converttest(T::Type, var) - @test typeof(convert(T, var)) == T + @test typeof(T(var)) == T end function constructors(mat) @@ -15,14 +15,14 @@ function test_adjacency(mat) adjmat, stochmat, adjhat, avgmat = constructors(mat) - @test adjmat.D == vec(sum(mat, 1)) + @test adjmat.D == vec(sum(mat, dims=1)) @test adjmat.A == mat - @test convert(SparseMatrix{Float64}, adjmat) == sparse(mat) - converttest(SparseMatrix{Float64}, stochmat) - converttest(SparseMatrix{Float64}, adjhat) - converttest(SparseMatrix{Float64}, avgmat) - @test isa(CombinatorialAdjacency(adjmat), CombinatorialAdjacency) - @test isa(CombinatorialAdjacency(avgmat), CombinatorialAdjacency) + @test isa(SparseArrays.sparse(mat), SparseArrays.SparseMatrixCSC) + @test isa(SparseArrays.sparse(stochmat), SparseArrays.SparseMatrixCSC) + @test isa(SparseArrays.sparse(adjhat), SparseArrays.SparseMatrixCSC) + @test isa(SparseArrays.sparse(avgmat), SparseArrays.SparseMatrixCSC) + @test isa(convert(CombinatorialAdjacency, adjmat), CombinatorialAdjacency) + @test isa(convert(CombinatorialAdjacency, avgmat), CombinatorialAdjacency) @test prescalefactor(adjhat) == postscalefactor(adjhat) @test postscalefactor(stochmat) == prescalefactor(avgmat) @test prescalefactor(adjhat) == postscalefactor(adjhat) @@ -40,7 +40,7 @@ @test typeof(AveragingAdjacency(adj)) <: AveragingAdjacency @test typeof(adjacency(lapl)) <: CombinatorialAdjacency - converttest(SparseMatrix{Float64}, lapl) + # converttest(SparseMatrix{Float64}, lapl) adjmat, stochmat, adjhat, avgmat = constructors(mat) @test typeof(adjacency(lapl)) <: CombinatorialAdjacency @@ -59,8 +59,10 @@ @test_throws MethodError NormalizedLaplacian(lapl) @test_throws MethodError AveragingLaplacian(lapl) @test_throws MethodError convert(CombinatorialAdjacency, lapl) - L = convert(SparseMatrix{Float64}, lapl) - @test sum(abs, (sum(L, 1))) == 0 + + L = SparseArrays.sparse(lapl) + + @test sum(abs, (sum(L, dims=1))) == 0 end function test_accessors(mat, n) @@ -86,18 +88,19 @@ @test sum(abs, (adjmat * onevec)) > 0.0 @test sum(abs, ((stochmat * onevec) / sum(onevec))) ≈ 1.0 @test sum(abs, (lapl * onevec)) == 0 - g(a) = sum(abs, (sum(sparse(a), 1))) + g(a) = sum(abs, (sum(SparseArrays.sparse(a), dims=1))) + @test g(lapl) == 0 @test g(NormalizedLaplacian(adjhat)) > 1e-13 @test g(StochasticLaplacian(stochmat)) > 1e-13 - @test eigs(adjmat, which=:LR)[1][1] > 1.0 - @test eigs(stochmat, which=:LR)[1][1] ≈ 1.0 - @test eigs(avgmat, which=:LR)[1][1] ≈ 1.0 - @test eigs(lapl, which=:LR)[1][1] > 2.0 - @test_throws MethodError eigs(lapl, which=:SM)[1][1] # --> greater_than(-0.0) + @test IterativeEigensolvers.eigs(adjmat, which=:LR)[1][1] > 1.0 + @test IterativeEigensolvers.eigs(stochmat, which=:LR)[1][1] ≈ 1.0 + @test IterativeEigensolvers.eigs(avgmat, which=:LR)[1][1] ≈ 1.0 + @test IterativeEigensolvers.eigs(lapl, which=:LR)[1][1] > 2.0 + @test_throws MethodError IterativeEigensolvers.eigs(lapl, which=:SM)[1][1] # --> greater_than(-0.0) lhat = NormalizedLaplacian(adjhat) - @test eigs(lhat, which=:LR)[1][1] < 2.0 + 1e-9 + @test IterativeEigensolvers.eigs(lhat, which=:LR)[1][1] < 2.0 + 1e-9 end function test_other(mat, n) @@ -109,20 +112,20 @@ @test_throws MethodError symmetrize(StochasticAdjacency(adjmat)) @test_throws MethodError symmetrize(AveragingAdjacency(adjmat)) - @test !issymmetric(AveragingAdjacency(adjmat)) - @test !issymmetric(StochasticAdjacency(adjmat)) + @test !LinearAlgebra.issymmetric(AveragingAdjacency(adjmat)) + @test !LinearAlgebra.issymmetric(StochasticAdjacency(adjmat)) @test_throws MethodError symmetrize(NormalizedAdjacency(adjmat)).A # --> adjmat.A begin @test CombinatorialAdjacency(mat) == CombinatorialAdjacency(mat) S = StochasticAdjacency(CombinatorialAdjacency(mat)) @test S.A == S.A - @test sparse(S) != S.A + @test SparseArrays.sparse(S) != S.A @test adjacency(S) == S.A @test NormalizedAdjacency(adjmat) != adjmat @test StochasticLaplacian(S) != adjmat @test_throws MethodError StochasticLaplacian(adjmat) # --> not(adjmat) - @test !issymmetric(S) + @test !LinearAlgebra.issymmetric(S) end end @@ -138,8 +141,8 @@ @test_throws MethodError symmetrize(NormalizedAdjacency(adjmat)).A # --> adjmat.A @test symmetrize(adjmat).A == adjmat.A # these tests are basically the code - @test symmetrize(adjmat, :triu).A == triu(adjmat.A) + triu(adjmat.A)' - @test symmetrize(adjmat, :tril).A == tril(adjmat.A) + tril(adjmat.A)' + @test symmetrize(adjmat, :triu).A == LinearAlgebra.triu(adjmat.A) + LinearAlgebra.triu(adjmat.A)' + @test symmetrize(adjmat, :tril).A == LinearAlgebra.tril(adjmat.A) + LinearAlgebra.tril(adjmat.A)' @test symmetrize(adjmat, :sum).A == adjmat.A + adjmat.A @test_throws ArgumentError symmetrize(adjmat, :fake) @@ -149,27 +152,28 @@ adjmat = CombinatorialAdjacency(mat) ahatp = PunchedAdjacency(adjmat) y = ahatp * perron(ahatp) - @test dot(y, ahatp.perron) ≈ 0.0 atol = 1.0e-8 + @test LinearAlgebra.dot(y, ahatp.perron) ≈ 0.0 atol = 1.0e-8 @test sum(abs, y) ≈ 0.0 atol = 1.0e-8 - eval, evecs = eigs(ahatp, which=:LM) + eval, evecs = IterativeEigensolvers.eigs(ahatp, which=:LM) @test eval[1] - (1 + 1.0e-8) <= 0 - @test dot(perron(ahatp), evecs[:, 1]) ≈ 0.0 atol = 1e-8 + @test LinearAlgebra.dot(perron(ahatp), evecs[:, 1]) ≈ 0.0 atol = 1e-8 ahat = ahatp.A @test isa(ahat, NormalizedAdjacency) z = ahatp * perron(ahat) - @test norm(z) ≈ 0.0 atol = 1e-8 + @test LinearAlgebra.norm(z) ≈ 0.0 atol = 1e-8 end n = 10 - mat = sparse(spones(sprand(n, n, 0.3))) + + mat = Float64.(sprand(Bool, n, n, 0.3)) test_adjacency(mat) test_laplacian(mat) test_accessors(mat, n) - mat = symmetrize(sparse(spones(sprand(n, n, 0.3)))) + mat = symmetrize(Float64.(sprand(Bool, n, n, 0.3))) test_arithmetic(mat, n) test_other(mat, n) test_symmetry(mat, n) @@ -179,7 +183,7 @@ """Computes the stationary distribution of a random walk""" function stationarydistribution(R::StochasticAdjacency; kwargs...) - er = eigs(R, nev=1, which=:LR; kwargs...) + er = IterativeEigensolvers.eigs(R, nev=1, which=:LR; kwargs...) l1 = er[1][1] abs(l1 - 1) < 1e-8 || error("failed to compute stationary distribution") # TODO 0.7: should we change the error type to InexactError? p = real(er[2][:, 1]) @@ -200,7 +204,7 @@ n = 100 p = 16 / n M = sprand(n, n, p) - M.nzval[:] = 1.0 + M.nzval[:] .= 1.0 A = CombinatorialAdjacency(M) sd = stationarydistribution(A; ncv=10) @test all(sd .>= 0) diff --git a/test/linalg/runtests.jl b/test/linalg/runtests.jl index 4f2577d65..6a2c32ae5 100644 --- a/test/linalg/runtests.jl +++ b/test/linalg/runtests.jl @@ -1,4 +1,5 @@ using LightGraphs.LinAlg +using IterativeEigensolvers const linalgtestdir = dirname(@__FILE__) diff --git a/test/linalg/spectral.jl b/test/linalg/spectral.jl index bc01f7995..eaabb809a 100644 --- a/test/linalg/spectral.jl +++ b/test/linalg/spectral.jl @@ -1,7 +1,7 @@ -import Base: full +import Base: Matrix # just so that we can assert equality of matrices -full(nbt::Nonbacktracking) = full(sparse(nbt)) +Matrix(nbt::Nonbacktracking) = Matrix(SparseArrays.sparse(nbt)) @testset "Spectral" begin @@ -36,8 +36,8 @@ full(nbt::Nonbacktracking) = full(sparse(nbt)) g = copy(g5) add_edge!(g, 1, 1) @test adjacency_matrix(g)[1, 1] == 1 - @test indegree(g) == sum(adjacency_matrix(g), 1)[1, :] - @test outdegree(g) == sum(adjacency_matrix(g), 2)[:, 1] + @test indegree(g) == sum(adjacency_matrix(g), dims=1)[1, :] + @test outdegree(g) == sum(adjacency_matrix(g), dims=2)[:, 1] g10 = CompleteGraph(10) for g in testgraphs(g10) @@ -45,17 +45,17 @@ full(nbt::Nonbacktracking) = full(sparse(nbt)) @test length(em) == 2 * ne(g) @test size(B) == (2 * ne(g), 2 * ne(g)) for i = 1:10 - @test sum(B[:, i]) == 8 - @test sum(B[i, :]) == 8 + @test sum(B[:, i]) == 8 + @test sum(B[i, :]) == 8 end - @test !issymmetric(B) + @test !LinearAlgebra.issymmetric(B) v = ones(Float64, ne(g)) z = zeros(Float64, nv(g)) n10 = Nonbacktracking(g) @test size(n10) == (2 * ne(g), 2 * ne(g)) @test eltype(n10) == Float64 - @test !issymmetric(n10) + @test !LinearAlgebra.issymmetric(n10) contract!(z, n10, v) @@ -93,9 +93,9 @@ full(nbt::Nonbacktracking) = full(sparse(nbt)) T = eltype(g) amat = adjacency_matrix(g, Float64; dir=dir) lmat = laplacian_matrix(g, Float64; dir=dir) - @test isa(amat, SparseMatrixCSC{Float64,T}) - @test isa(lmat, SparseMatrixCSC{Float64,T}) - evals = eigvals(full(lmat)) + @test isa(amat, SparseArrays.SparseMatrixCSC{Float64,T}) + @test isa(lmat, SparseArrays.SparseMatrixCSC{Float64,T}) + evals = LinearAlgebra.eigvals(Matrix(lmat)) @test all(evals .>= -1e-15) # positive semidefinite @test (minimum(evals)) ≈ 0 atol = 1e-13 end @@ -131,32 +131,32 @@ full(nbt::Nonbacktracking) = full(sparse(nbt)) for g in testgraphs(pg) nbt = Nonbacktracking(g) B, emap = non_backtracking_matrix(g) - Bs = sparse(nbt) - @test sparse(B) == Bs - @test eigs(nbt, nev=1)[1] ≈ eigs(B, nev=1)[1] atol = 1e-5 + Bs = SparseArrays.sparse(nbt) + @test SparseArrays.sparse(B) == Bs + @test IterativeEigensolvers.eigs(nbt, nev=1)[1] ≈ IterativeEigensolvers.eigs(B, nev=1)[1] atol = 1e-5 # check that matvec works x = ones(Float64, nbt.m) y = nbt * x z = B * x - @test norm(y - z) < 1e-8 + @test LinearAlgebra.norm(y - z) < 1e-8 - #check that matmat works and full(nbt) == B - @test norm(nbt * eye(nbt.m) - B) < 1e-8 + #check that matmat works and Matrix(nbt) == B - #check that matmat works and full(nbt) == B - @test norm(nbt * eye(nbt.m) - B) < 1e-8 + @test LinearAlgebra.norm(nbt * Matrix{Float64}(LinearAlgebra.I, nbt.m, nbt.m) - B) < 1e-8 + + #check that matmat works and Matrix(nbt) == B + @test LinearAlgebra.norm(nbt * Matrix{Float64}(LinearAlgebra.I, nbt.m, nbt.m) - B) < 1e-8 #check that we can use the implicit matvec in nonbacktrack_embedding @test size(y) == size(x) B₁ = Nonbacktracking(g10) - @test full(B₁) == full(B) + @test Matrix(B₁) == Matrix(B) @test B₁ * ones(size(B₁)[2]) == B * ones(size(B)[2]) @test size(B₁) == size(B) - # @test norm(eigs(B₁)[1] - eigs(B)[1]) ≈ 0.0 atol=1e-8 - @test !issymmetric(B₁) + @test !LinearAlgebra.issymmetric(B₁) @test eltype(B₁) == Float64 end # END tests for Nonbacktracking @@ -165,8 +165,8 @@ full(nbt::Nonbacktracking) = full(sparse(nbt)) for n = 3:10 polygon = random_regular_graph(n, 2) for g in testgraphs(polygon) - @test spectral_distance(g, g) ≈ 0 atol=1e-8 - @test spectral_distance(g, g, 1) ≈ 0 atol=1e-8 + @test spectral_distance(g, g) ≈ 0 atol = 1e-8 + @test spectral_distance(g, g, 1) ≈ 0 atol = 1e-8 end end end diff --git a/test/operators.jl b/test/operators.jl index 9e30e5138..168545520 100644 --- a/test/operators.jl +++ b/test/operators.jl @@ -8,7 +8,7 @@ @test nv(c) == 5 @test ne(c) == 6 - gb = @inferred(blkdiag(g, g)) + gb = @inferred(SparseArrays.blockdiag(g, g)) @test nv(gb) == 10 @test ne(gb) == 8 @@ -117,9 +117,9 @@ # add_edge!(h, 6, 5) # new_map = @inferred(merge_vertices!(h, [2, 3, 7, 3, 3, 2])) # @test new_map == [1, 2, 2, 4, 5, 3, 2] - # @test in_neighbors(h, 2) == [1, 4] - # @test out_neighbors(h, 2) == [3, 5] - # @test out_neighbors(h, 3) == [5] + # @test inneighbors(h, 2) == [1, 4] + # @test outneighbors(h, 2) == [3, 5] + # @test outneighbors(h, 3) == [5] end @@ -138,7 +138,7 @@ T = eltype(g) hc = CompleteGraph(2) h = Graph{T}(hc) - z = @inferred(blkdiag(g, h)) + z = @inferred(SparseArrays.blockdiag(g, h)) @test nv(z) == nv(g) + nv(h) @test ne(z) == ne(g) + ne(h) @test has_edge(z, 1, 2) @@ -173,10 +173,10 @@ @test size(p, 3) == 1 @test sum(p, 1) == sum(p, 2) @test_throws ArgumentError sum(p, 3) - @test sparse(p) == adjacency_matrix(p) + @test SparseArrays.sparse(p) == adjacency_matrix(p) @test length(p) == 100 @test ndims(p) == 2 - @test issymmetric(p) + @test LinearAlgebra.issymmetric(p) end gx = SimpleDiGraph(4) @@ -186,7 +186,7 @@ @test sum(g, 1) == [0, 1, 2, 1] @test sum(g, 2) == [2, 1, 1, 0] @test sum(g) == 4 - @test @inferred(!issymmetric(g)) + @test @inferred(!LinearAlgebra.issymmetric(g)) end nx = 20; ny = 21 @@ -204,7 +204,7 @@ m = nv(h) for i in 1:(len - 1) k = nv(g) - g = blkdiag(g, h) + g = SparseArrays.blockdiag(g, h) for v in 1:m add_edge!(g, v + (k - m), v + k) end diff --git a/test/runtests.jl b/test/runtests.jl index d0190e1f3..29f74f113 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,9 +1,14 @@ using LightGraphs using LightGraphs.SimpleGraphs -using Base.Test +using Test +using SparseArrays +using LinearAlgebra +using DelimitedFiles +using Base64 const testdir = dirname(@__FILE__) + testgraphs(g) = [g, Graph{UInt8}(g), Graph{Int16}(g)] testdigraphs(g) = [g, DiGraph{UInt8}(g), DiGraph{Int16}(g)] @@ -18,7 +23,6 @@ tests = [ "interface", "core", "operators", - # "graphdigraph", "degeneracy", "distance", "digraph/transitivity", @@ -35,11 +39,13 @@ tests = [ "shortestpaths/astar", "shortestpaths/bellman-ford", "shortestpaths/dijkstra", + "shortestpaths/johnson", "shortestpaths/floyd-warshall", "shortestpaths/yen", "traversals/bfs", "traversals/parallel_bfs", "traversals/bipartition", + "traversals/greedy_color", "traversals/dfs", "traversals/maxadjvisit", "traversals/randomwalks", @@ -49,6 +55,7 @@ tests = [ "community/label_propagation", "community/modularity", "community/clustering", + "community/clique_percolation", "centrality/betweenness", "centrality/closeness", "centrality/degree", @@ -57,12 +64,6 @@ tests = [ "centrality/eigenvector", "centrality/stress", "centrality/radiality", - "flow/edmonds_karp", - "flow/dinic", - "flow/boykov_kolmogorov", - "flow/push_relabel", - "flow/maximum_flow", - "flow/multiroute_flow", "utils", "spanningtrees/kruskal", "spanningtrees/prim", diff --git a/test/shortestpaths/astar.jl b/test/shortestpaths/astar.jl index 88fb9550b..bc0c4ec08 100644 --- a/test/shortestpaths/astar.jl +++ b/test/shortestpaths/astar.jl @@ -3,7 +3,7 @@ g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testgraphs(g3), dg in testdigraphs(g4) @test @inferred(a_star(g, 1, 4, d1)) == @inferred(a_star(dg, 1, 4, d1)) == diff --git a/test/shortestpaths/bellman-ford.jl b/test/shortestpaths/bellman-ford.jl index cfd977156..1ab42dce2 100644 --- a/test/shortestpaths/bellman-ford.jl +++ b/test/shortestpaths/bellman-ford.jl @@ -2,7 +2,7 @@ g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testdigraphs(g4) y = @inferred(bellman_ford_shortest_paths(g, 2, d1)) z = @inferred(bellman_ford_shortest_paths(g, 2, d2)) diff --git a/test/shortestpaths/dijkstra.jl b/test/shortestpaths/dijkstra.jl index 5911c01c0..754c02e41 100644 --- a/test/shortestpaths/dijkstra.jl +++ b/test/shortestpaths/dijkstra.jl @@ -1,21 +1,21 @@ @testset "Dijkstra" begin g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testdigraphs(g4) - y = @inferred(dijkstra_shortest_paths(g, 2, d1)) - z = @inferred(dijkstra_shortest_paths(g, 2, d2)) + y = @inferred(dijkstra_shortest_paths(g, 2, d1)) + z = @inferred(dijkstra_shortest_paths(g, 2, d2)) - @test y.parents == z.parents == [0, 0, 2, 3, 4] - @test y.dists == z.dists == [Inf, 0, 6, 17, 33] + @test y.parents == z.parents == [0, 0, 2, 3, 4] + @test y.dists == z.dists == [Inf, 0, 6, 17, 33] - y = @inferred(dijkstra_shortest_paths(g, 2, d1; allpaths=true)) - z = @inferred(dijkstra_shortest_paths(g, 2, d2; allpaths=true)) - @test z.predecessors[3] == y.predecessors[3] == [2] + y = @inferred(dijkstra_shortest_paths(g, 2, d1; allpaths=true)) + z = @inferred(dijkstra_shortest_paths(g, 2, d2; allpaths=true)) + @test z.predecessors[3] == y.predecessors[3] == [2] - @test @inferred(enumerate_paths(z)) == enumerate_paths(y) - @test @inferred(enumerate_paths(z))[4] == + @test @inferred(enumerate_paths(z)) == enumerate_paths(y) + @test @inferred(enumerate_paths(z))[4] == enumerate_paths(z, 4) == enumerate_paths(y, 4) == [2, 3, 4] end @@ -25,9 +25,9 @@ d = ones(Int, 5, 5) d[2, 3] = 100 for g in testgraphs(gx) - z = @inferred(dijkstra_shortest_paths(g, 1, d)) - @test z.dists == [0, 1, 3, 2, 3] - @test z.parents == [0, 1, 4, 2, 4] + z = @inferred(dijkstra_shortest_paths(g, 1, d)) + @test z.dists == [0, 1, 3, 2, 3] + @test z.parents == [0, 1, 4, 2, 4] end # small function to reconstruct the shortest path; I copied it from somewhere, can't find the original source to give the credits @@ -48,18 +48,18 @@ 1. 0. 3. 0.] for g in testgraphs(G) - ds = @inferred(dijkstra_shortest_paths(g, 2, w)) + ds = @inferred(dijkstra_shortest_paths(g, 2, w)) # this loop reconstructs the shortest path for vertices 1, 3 and 4 - @test spaths(ds, [1, 3, 4], 2) == Array[[2 1], + @test spaths(ds, [1, 3, 4], 2) == Array[[2 1], [2 3], [2 1 4]] # here a selflink at source is introduced; it should not change the shortest paths - w[2, 2] = 10.0 - ds = @inferred(dijkstra_shortest_paths(g, 2, w)) - shortest_paths = [] + w[2, 2] = 10.0 + ds = @inferred(dijkstra_shortest_paths(g, 2, w)) + shortest_paths = [] # this loop reconstructs the shortest path for vertices 1, 3 and 4 - @test spaths(ds, [1, 3, 4], 2) == Array[[2 1], + @test spaths(ds, [1, 3, 4], 2) == Array[[2 1], [2 3], [2 1 4]] end @@ -74,15 +74,15 @@ add_edge!(G, 3, 4) add_edge!(G, 4, 5) for g in testgraphs(G) - ds = @inferred(dijkstra_shortest_paths(g, 1, m; allpaths=true)) - @test ds.pathcounts == [1, 1, 1, 1, 2] - @test ds.predecessors == [[], [1], [1], [3], [3, 4]] - @test ds.predecessors == [[], [1], [1], [3], [3, 4]] - - dm = @inferred(dijkstra_shortest_paths(g, 1; allpaths=true, trackvertices=true)) - @test dm.pathcounts == [1, 1, 1, 1, 2] - @test dm.predecessors == [[], [1], [1], [3], [2, 3]] - @test dm.closest_vertices == [1, 2, 3, 5, 4] + ds = @inferred(dijkstra_shortest_paths(g, 1, m; allpaths=true)) + @test ds.pathcounts == [1, 1, 1, 1, 2] + @test ds.predecessors == [[], [1], [1], [3], [3, 4]] + @test ds.predecessors == [[], [1], [1], [3], [3, 4]] + + dm = @inferred(dijkstra_shortest_paths(g, 1; allpaths=true, trackvertices=true)) + @test dm.pathcounts == [1, 1, 1, 1, 2] + @test dm.predecessors == [[], [1], [1], [3], [2, 3]] + @test dm.closest_vertices == [1, 2, 3, 5, 4] end G = SimpleGraph(5) @@ -90,8 +90,8 @@ add_edge!(G, 1, 3) add_edge!(G, 4, 5) for g in testgraphs(G) - dm = @inferred(dijkstra_shortest_paths(g, 1; allpaths=true, trackvertices=true)) - @test dm.closest_vertices == [1, 2, 3, 4, 5] + dm = @inferred(dijkstra_shortest_paths(g, 1; allpaths=true, trackvertices=true)) + @test dm.closest_vertices == [1, 2, 3, 4, 5] end #Testing multisource On undirected Graph @@ -99,51 +99,51 @@ d = [0 1 2 3 4; 1 0 1 0 1; 2 1 0 11 12; 3 0 11 0 5; 4 1 19 5 0] for g in testgraphs(g3) - z = @inferred(floyd_warshall_shortest_paths(g, d)) - zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, collect(1:5), d)) - @test all(isapprox(z.dists, zp.dists)) - - for i in 1:5 - state = dijkstra_shortest_paths(g, i; allpaths=true); - for j in 1:5 - if z.parents[i, j] != 0 - @test z.parents[i, j] in state.predecessors[j] - else - @test length(state.predecessors[j]) == 0 - @test state.parents[j] == 0 - end + z = @inferred(floyd_warshall_shortest_paths(g, d)) + zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, collect(1:5), d)) + @test all(isapprox(z.dists, zp.dists)) + + for i in 1:5 + state = dijkstra_shortest_paths(g, i; allpaths=true); + for j in 1:5 + if z.parents[i, j] != 0 + @test z.parents[i, j] in state.predecessors[j] + else + @test length(state.predecessors[j]) == 0 + @test state.parents[j] == 0 + end + end end - end - - z = @inferred(floyd_warshall_shortest_paths(g)) - zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g)) - @test all(isapprox(z.dists, zp.dists)) - - for i in 1:5 - state = dijkstra_shortest_paths(g, i; allpaths=true); - for j in 1:5 - if z.parents[i, j] != 0 - @test z.parents[i, j] in state.predecessors[j] - else - @test length(state.predecessors[j]) == 0 - end + + z = @inferred(floyd_warshall_shortest_paths(g)) + zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g)) + @test all(isapprox(z.dists, zp.dists)) + + for i in 1:5 + state = dijkstra_shortest_paths(g, i; allpaths=true); + for j in 1:5 + if z.parents[i, j] != 0 + @test z.parents[i, j] in state.predecessors[j] + else + @test length(state.predecessors[j]) == 0 + end + end end - end - - z = @inferred(floyd_warshall_shortest_paths(g)) - zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, [1, 2])) - @test all(isapprox(z.dists[1:2, :], zp.dists)) - - for i in 1:2 - state = dijkstra_shortest_paths(g, i;allpaths=true); - for j in 1:5 - if z.parents[i, j] != 0 - @test z.parents[i, j] in state.predecessors[j] - else - @test length(state.predecessors[j]) == 0 - end + + z = @inferred(floyd_warshall_shortest_paths(g)) + zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, [1, 2])) + @test all(isapprox(z.dists[1:2, :], zp.dists)) + + for i in 1:2 + state = dijkstra_shortest_paths(g, i;allpaths=true); + for j in 1:5 + if z.parents[i, j] != 0 + @test z.parents[i, j] in state.predecessors[j] + else + @test length(state.predecessors[j]) == 0 + end + end end - end end @@ -152,49 +152,49 @@ d = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) for g in testdigraphs(g3) - z = @inferred(floyd_warshall_shortest_paths(g, d)) - zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, collect(1:5), d)) - @test all(isapprox(z.dists, zp.dists)) - - for i in 1:5 - state = dijkstra_shortest_paths(g, i; allpaths=true); - for j in 1:5 - if z.parents[i, j] != 0 - @test z.parents[i, j] in state.predecessors[j] - else - @test length(state.predecessors[j]) == 0 - end + z = @inferred(floyd_warshall_shortest_paths(g, d)) + zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, collect(1:5), d)) + @test all(isapprox(z.dists, zp.dists)) + + for i in 1:5 + state = dijkstra_shortest_paths(g, i; allpaths=true); + for j in 1:5 + if z.parents[i, j] != 0 + @test z.parents[i, j] in state.predecessors[j] + else + @test length(state.predecessors[j]) == 0 + end + end end - end - - z = @inferred(floyd_warshall_shortest_paths(g)) - zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g)) - @test all(isapprox(z.dists, zp.dists)) - - for i in 1:5 - state = dijkstra_shortest_paths(g, i; allpaths=true); - for j in 1:5 - if z.parents[i, j] != 0 - @test z.parents[i, j] in state.predecessors[j] - else - @test length(state.predecessors[j]) == 0 - end + + z = @inferred(floyd_warshall_shortest_paths(g)) + zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g)) + @test all(isapprox(z.dists, zp.dists)) + + for i in 1:5 + state = dijkstra_shortest_paths(g, i; allpaths=true); + for j in 1:5 + if z.parents[i, j] != 0 + @test z.parents[i, j] in state.predecessors[j] + else + @test length(state.predecessors[j]) == 0 + end + end end - end - - z = @inferred(floyd_warshall_shortest_paths(g)) - zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, [1, 2])) - @test all(isapprox(z.dists[1:2, :], zp.dists)) - - for i in 1:2 - state = dijkstra_shortest_paths(g, i;allpaths=true); - for j in 1:5 - if z.parents[i, j] != 0 - @test z.parents[i, j] in state.predecessors[j] - else - @test length(state.predecessors[j]) == 0 - end + + z = @inferred(floyd_warshall_shortest_paths(g)) + zp = @inferred(parallel_multisource_dijkstra_shortest_paths(g, [1, 2])) + @test all(isapprox(z.dists[1:2, :], zp.dists)) + + for i in 1:2 + state = dijkstra_shortest_paths(g, i;allpaths=true); + for j in 1:5 + if z.parents[i, j] != 0 + @test z.parents[i, j] in state.predecessors[j] + else + @test length(state.predecessors[j]) == 0 + end + end end - end end end diff --git a/test/shortestpaths/johnson.jl b/test/shortestpaths/johnson.jl new file mode 100644 index 000000000..97a4d1281 --- /dev/null +++ b/test/shortestpaths/johnson.jl @@ -0,0 +1,44 @@ +@testset "Johnson" begin + g3 = PathGraph(5) + d = LinearAlgebra.Symmetric([0 1 2 3 4; 1 0 6 7 8; 2 6 0 11 12; 3 7 11 0 16; 4 8 12 16 0]) + for g in testgraphs(g3) + z = @inferred(johnson_shortest_paths(g, d)) + @test z.dists[3, :][:] == [7, 6, 0, 11, 27] + @test z.parents[3, :][:] == [2, 3, 0, 3, 4] + + @test @inferred(enumerate_paths(z))[2][2] == [] + @test @inferred(enumerate_paths(z))[2][4] == enumerate_paths(z, 2)[4] == enumerate_paths(z, 2, 4) == [2, 3, 4] + + z = @inferred(johnson_shortest_paths(g, d, parallel=true)) + @test z.dists[3, :][:] == [7, 6, 0, 11, 27] + @test z.parents[3, :][:] == [2, 3, 0, 3, 4] + + @test @inferred(enumerate_paths(z))[2][2] == [] + @test @inferred(enumerate_paths(z))[2][4] == enumerate_paths(z, 2)[4] == enumerate_paths(z, 2, 4) == [2, 3, 4] + + end + + g4 = PathDiGraph(4) + for g in testdigraphs(g4) + z = @inferred(johnson_shortest_paths(g)) + @test length(enumerate_paths(z, 4, 3)) == 0 + @test length(enumerate_paths(z, 4, 1)) == 0 + @test length(enumerate_paths(z, 2, 3)) == 2 + + z = @inferred(johnson_shortest_paths(g, parallel=true)) + @test length(enumerate_paths(z, 4, 3)) == 0 + @test length(enumerate_paths(z, 4, 1)) == 0 + @test length(enumerate_paths(z, 2, 3)) == 2 + end + + g5 = DiGraph([1 1 1 0 1; 0 1 0 1 1; 0 1 1 0 0; 1 0 1 1 0; 0 0 0 1 1]) + d = [0 3 8 0 -4; 0 0 0 1 7; 0 4 0 0 0; 2 0 -5 0 0; 0 0 0 6 0] + for g in testdigraphs(g5) + z = @inferred(johnson_shortest_paths(g, d)) + @test z.dists == [0 1 -3 2 -4; 3 0 -4 1 -1; 7 4 0 5 3; 2 -1 -5 0 -2; 8 5 1 6 0] + + z = @inferred(johnson_shortest_paths(g, d, parallel=true)) + @test z.dists == [0 1 -3 2 -4; 3 0 -4 1 -1; 7 4 0 5 3; 2 -1 -5 0 -2; 8 5 1 6 0] + end + +end diff --git a/test/shortestpaths/yen.jl b/test/shortestpaths/yen.jl index ebec4b95f..5baef9331 100644 --- a/test/shortestpaths/yen.jl +++ b/test/shortestpaths/yen.jl @@ -1,7 +1,7 @@ @testset "Yen" begin g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testdigraphs(g4) x = @inferred(yen_k_shortest_paths(g, 5, 5)) diff --git a/test/simplegraphs/simpleedgeiter.jl b/test/simplegraphs/simpleedgeiter.jl index 0801944f6..96bf24354 100644 --- a/test/simplegraphs/simpleedgeiter.jl +++ b/test/simplegraphs/simpleedgeiter.jl @@ -24,6 +24,7 @@ @test collect(SimpleEdge, edges(gb)) == edges(ga) @test Set{Edge}(collect(SimpleEdge, edges(gb))) == edges(ga) @test @inferred(edges(ga)) == Set{SimpleEdge}(collect(SimpleEdge, edges(gb))) + @test eltype(collect(edges(ga))) == edgetype(ga) ga = SimpleGraph(10) add_edge!(ga, 3, 2) diff --git a/test/simplegraphs/simplegraphs.jl b/test/simplegraphs/simplegraphs.jl index ce69dce7d..73502af8b 100644 --- a/test/simplegraphs/simplegraphs.jl +++ b/test/simplegraphs/simplegraphs.jl @@ -1,3 +1,4 @@ +import Random @testset "SimpleGraphs" begin adjmx1 = [0 1 0; 1 0 1; 0 1 0] # graph @@ -27,14 +28,14 @@ gx = SimpleGraph() for g in testgraphs(gx) T = eltype(g) - @test sprint(show, g) == "empty undirected simple $T graph" + @test sprint(show, g) == "{0, 0} undirected simple $T graph" @test @inferred(add_vertices!(g, 5) == 5) @test sprint(show, g) == "{5, 0} undirected simple $T graph" end gx = SimpleDiGraph() for g in testdigraphs(gx) T = eltype(g) - @test sprint(show, g) == "empty directed simple $T graph" + @test sprint(show, g) == "{0, 0} directed simple $T graph" @test @inferred(add_vertices!(g, 5) == 5) @test sprint(show, g) == "{5, 0} directed simple $T graph" end @@ -61,7 +62,7 @@ gc = copy(g) @test add_edge!(gc, 4, 1) && gc == CycleGraph(4) - @test @inferred(in_neighbors(g, 2)) == @inferred(out_neighbors(g, 2)) == @inferred(neighbors(g, 2)) == [1, 3] + @test @inferred(inneighbors(g, 2)) == @inferred(outneighbors(g, 2)) == @inferred(neighbors(g, 2)) == [1, 3] @test @inferred(add_vertex!(gc)) # out of order, but we want it for issubset @test @inferred(g ⊆ gc) @test @inferred(has_vertex(gc, 5)) @@ -124,8 +125,8 @@ gc = @inferred(copy(g)) @test @inferred(add_edge!(gc, 4, 1)) && gc == CycleDiGraph(4) - @test @inferred(in_neighbors(g, 2)) == [1] - @test @inferred(out_neighbors(g, 2)) == @inferred(neighbors(g, 2)) == [3] + @test @inferred(inneighbors(g, 2)) == [1] + @test @inferred(outneighbors(g, 2)) == @inferred(neighbors(g, 2)) == [3] @test @inferred(add_vertex!(gc)) # out of order, but we want it for issubset @test @inferred(g ⊆ gc) @test @inferred(has_vertex(gc, 5)) @@ -173,6 +174,188 @@ @test nv(g) == 3 && ne(g) == 6 @test g != h end + # tests for #820 + g = CompleteGraph(3) + add_edge!(g, 3, 3) + rem_vertex!(g, 1) + @test nv(g) == 2 && ne(g) == 2 && has_edge(g, 1, 1) + + g = PathDiGraph(3) + add_edge!(g, 3, 3) + rem_vertex!(g, 1) + @test nv(g) == 2 && ne(g) == 2 && has_edge(g, 1, 1) + + # Tests for constructors from iterators of edges + let + g_undir = erdos_renyi(200, 100; seed=0) + add_edge!(g_undir, 200, 1) # ensure that the result uses all vertices + add_edge!(g_undir, 2, 2) # add a self-loop + for g in testgraphs(g_undir) + # We create an edge list, shuffle it and reverse half of its edges + # using this edge list should result in the same graph + edge_list = [e for e in edges(g)] + Random.shuffle!(Random.MersenneTwister(0), edge_list) + for i in rand(Random.MersenneTwister(0), 1:length(edge_list), length(edge_list) ÷ 2) + e = edge_list[i] + Te = typeof(e) + edge_list[i] = Te(dst(e), src(e)) + end + + edge_iter = (e for e in edge_list) + edge_set = Set(edge_list) + edge_set_any = Set{Any}(edge_list) + + g1 = @inferred SimpleGraph(edge_list) + # we can't infer the return type of SimpleGraphFromIterator at the moment + g2 = SimpleGraphFromIterator(edge_list) + g3 = SimpleGraphFromIterator(edge_iter) + g4 = SimpleGraphFromIterator(edge_set) + g5 = SimpleGraphFromIterator(edge_set_any) + + @test g == g1 + @test g == g2 + @test g == g3 + @test g == g4 + @test g == g5 + @test edgetype(g) == edgetype(g1) + @test edgetype(g) == edgetype(g2) + @test edgetype(g) == edgetype(g3) + @test edgetype(g) == edgetype(g4) + @test edgetype(g) == edgetype(g5) + end + g_dir = erdos_renyi(200, 100; is_directed=true, seed=0) + add_edge!(g_dir, 200, 1) + add_edge!(g_dir, 2, 2) + for g in testdigraphs(g_dir) + # We create an edge list and shuffle it + edge_list = [e for e in edges(g)] + Random.shuffle!(Random.MersenneTwister(0), edge_list) + + edge_iter = (e for e in edge_list) + edge_set = Set(edge_list) + edge_set_any = Set{Any}(edge_list) + + g1 = @inferred SimpleDiGraph(edge_list) + # we can't infer the return type of SimpleDiGraphFromIterator at the moment + g2 = SimpleDiGraphFromIterator(edge_list) + g3 = SimpleDiGraphFromIterator(edge_iter) + g4 = SimpleDiGraphFromIterator(edge_set) + g5 = SimpleDiGraphFromIterator(edge_set_any) + + @test g == g1 + @test g == g2 + @test g == g3 + @test g == g4 + @test g == g5 + @test edgetype(g) == edgetype(g1) + @test edgetype(g) == edgetype(g2) + @test edgetype(g) == edgetype(g3) + @test edgetype(g) == edgetype(g4) + @test edgetype(g) == edgetype(g5) + end + + # SimpleGraphFromIterator of an empty iterator should result + # in an empty graph of default edgetype + empty_iter = (x for x in []) + @test SimpleGraphFromIterator(empty_iter) == SimpleGraph(0) + @test SimpleDiGraphFromIterator(empty_iter) == SimpleDiGraph(0) + @test edgetype(SimpleGraphFromIterator(empty_iter)) == edgetype(SimpleGraph(0)) + @test edgetype(SimpleDiGraphFromIterator(empty_iter)) == edgetype(SimpleDiGraph(0)) + + # check if multiple edges && multiple self-loops result in the + # correct number of edges & vertices + # edges using integers < 1 should be ignored + g_undir = SimpleGraph(0) + for g in testgraphs(g_undir) + T = edgetype(g) + edge_list = T.([(4,4),(1,2),(4,4),(1,2),(4,4),(2,1),(0,1),(1,0),(0,0)]) + edge_iter = (e for e in edge_list) + edge_set = Set(edge_list) + edge_set_any = Set{Any}(edge_list) + + g1 = @inferred SimpleGraph(edge_list) + g2 = SimpleGraphFromIterator(edge_list) + g3 = SimpleGraphFromIterator(edge_iter) + g4 = SimpleGraphFromIterator(edge_set) + g5 = SimpleGraphFromIterator(edge_set_any) + + @test nv(g1) == 4 + @test nv(g2) == 4 + @test nv(g3) == 4 + @test nv(g4) == 4 + @test nv(g5) == 4 + + @test ne(g1) == 2 + @test ne(g2) == 2 + @test ne(g3) == 2 + @test ne(g4) == 2 + @test ne(g5) == 2 + end + g_dir = SimpleDiGraph(0) + for g in testdigraphs(g_dir) + T = edgetype(g) + edge_list = T.([(4,4),(1,2),(4,4),(1,2),(4,4),(2,1),(0,1),(1,0),(0,0)]) + edge_iter = (e for e in edge_list) + edge_set = Set(edge_list) + edge_set_any = Set{Any}(edge_list) + + g1 = @inferred SimpleDiGraph(edge_list) + g2 = SimpleDiGraphFromIterator(edge_list) + g3 = SimpleDiGraphFromIterator(edge_iter) + g4 = SimpleDiGraphFromIterator(edge_set) + g5 = SimpleDiGraphFromIterator(edge_set_any) + + @test nv(g1) == 4 + @test nv(g2) == 4 + @test nv(g3) == 4 + @test nv(g4) == 4 + @test nv(g5) == 4 + + @test ne(g1) == 3 + @test ne(g2) == 3 + @test ne(g3) == 3 + @test ne(g4) == 3 + @test ne(g5) == 3 + end + # test for iterators where the type of the elements can only be determined at runtime + g_undir = SimpleGraph(0) + for g in testgraphs(g_undir) + T = edgetype(g) + edge_list_good = Any[ T.(1,2), T.(3,4) ] + edge_list_bad = Any[ T.(1,2), Int64(1) ] + g1 = SimpleGraphFromIterator(edge_list_good) + @test edgetype(g1) == T + @test_throws ArgumentError SimpleGraphFromIterator(edge_list_bad) + end + g_dir = SimpleDiGraph(0) + for g in testdigraphs(g_dir) + T = edgetype(g) + edge_list_good = Any[ T.(1,2), T.(3,4) ] + edge_list_bad = Any[ T.(1,2), Int64(1) ] + + g1 = SimpleDiGraphFromIterator(edge_list_good) + @test edgetype(g1) == T + @test_throws ArgumentError SimpleDiGraphFromIterator(edge_list_bad) + end + + # If there are edges of multiple types, they should be propagated + # to a common supertype + edge_list_1 = Any[Edge{Int8}(1,2), Edge{Int16}(3,4)] + edge_list_2 = Any[Edge{Int16}(1,2), Edge{Int8}(3,4)] + g1_undir = SimpleGraphFromIterator(edge_list_1) + g2_undir = SimpleGraphFromIterator(edge_list_2) + g1_dir = SimpleGraphFromIterator(edge_list_1) + g2_dir = SimpleGraphFromIterator(edge_list_2) + + @test Int8 <: eltype(g1_undir) + @test Int16 <: eltype(g1_undir) + @test Int8 <: eltype(g2_undir) + @test Int16 <: eltype(g2_undir) + @test Int8 <: eltype(g1_dir) + @test Int16 <: eltype(g1_dir) + @test Int8 <: eltype(g2_dir) + @test Int16 <: eltype(g2_dir) + end end diff --git a/test/traversals/bfs.jl b/test/traversals/bfs.jl index f50b21abe..e0d4163de 100644 --- a/test/traversals/bfs.jl +++ b/test/traversals/bfs.jl @@ -11,7 +11,7 @@ import LightGraphs: tree @test nv(z) == 4 && ne(z) == 3 && !has_edge(z, 2, 3) end for g in testgraphs(g6) - @test @inferred(gdistances(g, 2)) == [1, 0, 2, 1, 2] + @test @inferred(gdistances(g, 2)) == @inferred(gdistances(g, 2; sort_alg = MergeSort)) == [1, 0, 2, 1, 2] @test @inferred(gdistances(g, [1, 2])) == [0, 0, 1, 1, 2] @test @inferred(gdistances(g, [])) == fill(typemax(eltype(g)), 5) end @@ -28,7 +28,7 @@ import LightGraphs: tree # import LightGraphs: TreeBFSVisitorVector, bfs_tree!, tree - function istree{T<:Integer}(parents::Vector{T}, maxdepth, n::T) + function istree(parents::Vector{T}, maxdepth, n::T) where T<:Integer flag = true for i in one(T):n s = i @@ -59,25 +59,25 @@ import LightGraphs: tree gx = SimpleGraph(6) d = nv(gx) - for (i, j) in [(1, 2), (2, 3), (2, 4), (4, 5), (3,5)] + for (i, j) in [(1, 2), (2, 3), (2, 4), (4, 5), (3, 5)] add_edge!(gx, i, j) end for g in testgraphs(gx) @test has_path(g, 1, 5) @test has_path(g, 1, 2) - @test has_path(g, 1, 5; exclude_vertices = [3]) - @test has_path(g, 1, 5; exclude_vertices = [4]) - @test !has_path(g, 1, 5; exclude_vertices = [3, 4]) + @test has_path(g, 1, 5; exclude_vertices=[3]) + @test has_path(g, 1, 5; exclude_vertices=[4]) + @test !has_path(g, 1, 5; exclude_vertices=[3, 4]) @test has_path(g, 5, 1) - @test has_path(g, 5, 1; exclude_vertices = [3]) - @test has_path(g, 5, 1; exclude_vertices = [4]) - @test !has_path(g, 5, 1; exclude_vertices = [3, 4]) + @test has_path(g, 5, 1; exclude_vertices=[3]) + @test has_path(g, 5, 1; exclude_vertices=[4]) + @test !has_path(g, 5, 1; exclude_vertices=[3, 4]) # Edge cases @test !has_path(g, 1, 6) @test !has_path(g, 6, 1) @test has_path(g, 1, 1) # inseparable - @test !has_path(g, 1, 2; exclude_vertices = [2]) - @test !has_path(g, 1, 2; exclude_vertices = [1]) + @test !has_path(g, 1, 2; exclude_vertices=[2]) + @test !has_path(g, 1, 2; exclude_vertices=[1]) end end diff --git a/test/traversals/bipartition.jl b/test/traversals/bipartition.jl index 31736078a..0efb06b70 100644 --- a/test/traversals/bipartition.jl +++ b/test/traversals/bipartition.jl @@ -21,9 +21,29 @@ g10 = CompleteBipartiteGraph(10, 10) for g in testgraphs(g10) T = eltype(g) - @test @inferred(bipartite_map(g10)) == Vector{T}([ones(T, 10); 2 * ones(T, 10)]) + @test @inferred(bipartite_map(g)) == Vector{T}([ones(T, 10); 2 * ones(T, 10)]) - h = blkdiag(g, g) + h = SparseArrays.blockdiag(g, g) @test @inferred(bipartite_map(h)) == Vector{T}([ones(T, 10); 2 * ones(T, 10); ones(T, 10); 2 * ones(T, 10)]) end + + g2 = CompleteGraph(2) + for g in testgraphs(g2) + @test @inferred(bipartite_map(g)) == Vector{eltype(g)}([1, 2]) + end + + g2 = Graph(2) + for g in testgraphs(g2) + @test @inferred(bipartite_map(g)) == Vector{eltype(g)}([1, 1]) + end + + g2 = DiGraph(2) + for g in testdigraphs(g2) + @test @inferred(bipartite_map(g)) == Vector{eltype(g)}([1, 1]) + end + + g2 = PathDiGraph(2) + for g in testdigraphs(g2) + @test @inferred(bipartite_map(g)) == Vector{eltype(g)}([1, 2]) + end end diff --git a/test/traversals/greedy_color.jl b/test/traversals/greedy_color.jl new file mode 100644 index 000000000..d29d5500a --- /dev/null +++ b/test/traversals/greedy_color.jl @@ -0,0 +1,32 @@ +@testset "Greedy Coloring" begin + + g3 = StarGraph(10) + + for g in testgraphs(g3) + for op_sort in (true, false), op_parallel in (true, false) + C = @inferred(greedy_color(g, reps=5, sort_degree=op_sort, parallel=op_parallel)) + @test C.num_colors == 2 + end + end + + g4 = PathGraph(20) + g5 = CompleteGraph(20) + + for graph in [g4, g5] + for g in testgraphs(graph) + for op_sort in (true, false), op_parallel in (true, false) + C = @inferred(greedy_color(g, reps=5, sort_degree=op_sort, parallel=op_parallel)) + + @test C.num_colors <= maximum(degree(g))+1 + correct = true + for e in edges(g) + C.colors[src(e)] == C.colors[dst(e)] && (correct = false) + end + + @test correct + end + end + end + +end + diff --git a/test/traversals/parallel_bfs.jl b/test/traversals/parallel_bfs.jl index bc3265472..5703c4035 100644 --- a/test/traversals/parallel_bfs.jl +++ b/test/traversals/parallel_bfs.jl @@ -12,7 +12,7 @@ import Base.Threads: Atomic, @threads push!(next, i) end @test Set([i[] for i in next[1:5]]) == Set([1, 2, 3, 4, 5]) - first = shift!(next) + first = popfirst!(next) @test first == 1 end From 31dbeac6f896f2269f7dfc34117483e8b308b45f Mon Sep 17 00:00:00 2001 From: mbesancon Date: Wed, 13 Jun 2018 16:00:59 -0400 Subject: [PATCH 3/4] updated syntax for iterator protocol --- src/LightGraphs.jl | 2 +- src/SimpleGraphs/SimpleGraphs.jl | 2 +- src/SimpleGraphs/simpleedgeiter.jl | 13 ++++++++++--- src/SimpleGraphs/simplegraph.jl | 6 +++--- test/simplegraphs/simpleedgeiter.jl | 6 +++--- 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/LightGraphs.jl b/src/LightGraphs.jl index d1126e960..438014bbc 100644 --- a/src/LightGraphs.jl +++ b/src/LightGraphs.jl @@ -18,7 +18,7 @@ import SparseArrays: blockdiag, sparse import Base: write, ==, <, *, ≈, convert, isless, issubset, union, intersect, reverse, reverse!, isassigned, getindex, setindex!, show, print, copy, in, sum, size, eltype, length, ndims, transpose, - ctranspose, join, start, next, done, eltype, get, Pair, Tuple, zero + ctranspose, join, iterate, eltype, get, Pair, Tuple, zero export diff --git a/src/SimpleGraphs/SimpleGraphs.jl b/src/SimpleGraphs/SimpleGraphs.jl index 9362337c2..2c81c5262 100644 --- a/src/SimpleGraphs/SimpleGraphs.jl +++ b/src/SimpleGraphs/SimpleGraphs.jl @@ -4,7 +4,7 @@ using SparseArrays using LinearAlgebra import Base: - eltype, show, ==, Pair, Tuple, copy, length, start, next, done, issubset, zero, in + eltype, show, ==, Pair, Tuple, copy, length, issubset, zero, in import LightGraphs: _NI, _insert_and_dedup!, AbstractGraph, AbstractEdge, AbstractEdgeIter, diff --git a/src/SimpleGraphs/simpleedgeiter.jl b/src/SimpleGraphs/simpleedgeiter.jl index daab36a24..4dbd808a9 100644 --- a/src/SimpleGraphs/simpleedgeiter.jl +++ b/src/SimpleGraphs/simpleedgeiter.jl @@ -46,10 +46,17 @@ function edge_next(g::AbstractSimpleGraph, return e, SimpleEdgeIterState(zero(T), 1) end -start(eit::SimpleEdgeIter) = edge_start(eit.g) -done(eit::SimpleEdgeIter, state::SimpleEdgeIterState{T}) where T = state.s == zero(T) +function iterate(eit::SimpleEdgeIter{AbstractSimpleGraph{T}}) where T + state = edge_start(eit.g) + return iterate(eit, state) +end + +function iterate(eit::SimpleEdgeIter{AbstractSimpleGraph{T}}, state::SimpleEdgeIterState{T}) where T + state.s == zero(T) && return nothing + return edge_next(eit.g, state) +end + length(eit::SimpleEdgeIter) = ne(eit.g) -next(eit::SimpleEdgeIter, state::SimpleEdgeIterState) = edge_next(eit.g, state) function _isequal(e1::SimpleEdgeIter, e2) k = 0 diff --git a/src/SimpleGraphs/simplegraph.jl b/src/SimpleGraphs/simplegraph.jl index f779789d7..5b48dbf16 100644 --- a/src/SimpleGraphs/simplegraph.jl +++ b/src/SimpleGraphs/simplegraph.jl @@ -13,7 +13,7 @@ end eltype(x::SimpleGraph{T}) where T = T # Graph{UInt8}(6), Graph{Int16}(7), Graph{UInt8}() -function SimpleGraph{T}(n::Integer=0) where T <: Integer +function SimpleGraph(n::Integer=0) where T <: Integer fadjlist = [Vector{T}() for _ = one(T):n] return SimpleGraph{T}(0, fadjlist) end @@ -28,7 +28,7 @@ SimpleGraph(n::T) where T <: Integer = SimpleGraph{T}(n) SimpleGraph(::Type{T}) where T <: Integer = SimpleGraph{T}(zero(T)) # Graph{UInt8}(adjmx) -function SimpleGraph{T}(adjmx::AbstractMatrix) where T <: Integer +function SimpleGraph(adjmx::AbstractMatrix) where T <: Integer dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) issymmetric(adjmx) || throw(ArgumentError("Adjacency / distance matrices must be symmetric")) @@ -41,7 +41,7 @@ function SimpleGraph{T}(adjmx::AbstractMatrix) where T <: Integer end # converts Graph{Int} to Graph{Int32} -function SimpleGraph{T}(g::SimpleGraph) where T <: Integer +function SimpleGraph(g::SimpleGraph) where T <: Integer h_fadj = [Vector{T}(x) for x in fadj(g)] return SimpleGraph(ne(g), h_fadj) end diff --git a/test/simplegraphs/simpleedgeiter.jl b/test/simplegraphs/simpleedgeiter.jl index 96bf24354..43a2ea1b8 100644 --- a/test/simplegraphs/simpleedgeiter.jl +++ b/test/simplegraphs/simpleedgeiter.jl @@ -2,7 +2,7 @@ ga = @inferred(SimpleGraph(10, 20; seed=1)) gb = @inferred(SimpleGraph(10, 20; seed=1)) @test sprint(show, edges(ga)) == "SimpleEdgeIter 20" - @test sprint(show, start(edges(ga))) == "SimpleEdgeIterState [1, 1]" + @test sprint(show, iterate(edges(ga))[2]) == "SimpleEdgeIterState [1, 1]" @test length(collect(edges(Graph(0, 0)))) == 0 @@ -46,7 +46,7 @@ end eit = edges(ga) - es = @inferred(start(eit)) + es = @inferred(iterate(eit)[2]) @test es.s == 2 @test es.di == 1 @@ -73,7 +73,7 @@ end eit = @inferred(edges(ga)) - es = @inferred(start(eit)) + es = @inferred(iterate(eit)[2]) @test es.s == 3 @test es.di == 1 From 5df437b2bec60c7651d7839d9174ab0e98b123aa Mon Sep 17 00:00:00 2001 From: mbesancon Date: Wed, 13 Jun 2018 18:32:42 -0400 Subject: [PATCH 4/4] fixed iterator, broken inference --- src/SimpleGraphs/SimpleGraphs.jl | 2 +- src/SimpleGraphs/simpleedgeiter.jl | 4 ++-- src/SimpleGraphs/simplegraph.jl | 6 +++--- test/simplegraphs/simpleedgeiter.jl | 15 +++++++++------ 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/SimpleGraphs/SimpleGraphs.jl b/src/SimpleGraphs/SimpleGraphs.jl index 2c81c5262..c04bcc047 100644 --- a/src/SimpleGraphs/SimpleGraphs.jl +++ b/src/SimpleGraphs/SimpleGraphs.jl @@ -4,7 +4,7 @@ using SparseArrays using LinearAlgebra import Base: - eltype, show, ==, Pair, Tuple, copy, length, issubset, zero, in + eltype, show, ==, Pair, Tuple, copy, length, issubset, zero, in, iterate import LightGraphs: _NI, _insert_and_dedup!, AbstractGraph, AbstractEdge, AbstractEdgeIter, diff --git a/src/SimpleGraphs/simpleedgeiter.jl b/src/SimpleGraphs/simpleedgeiter.jl index 4dbd808a9..e077ed66e 100644 --- a/src/SimpleGraphs/simpleedgeiter.jl +++ b/src/SimpleGraphs/simpleedgeiter.jl @@ -46,12 +46,12 @@ function edge_next(g::AbstractSimpleGraph, return e, SimpleEdgeIterState(zero(T), 1) end -function iterate(eit::SimpleEdgeIter{AbstractSimpleGraph{T}}) where T +function iterate(eit::SimpleEdgeIter{G}) where {G<:AbstractSimpleGraph} state = edge_start(eit.g) return iterate(eit, state) end -function iterate(eit::SimpleEdgeIter{AbstractSimpleGraph{T}}, state::SimpleEdgeIterState{T}) where T +function iterate(eit::SimpleEdgeIter{G}, state::SimpleEdgeIterState{T}) where {T,G<:AbstractSimpleGraph{T}} state.s == zero(T) && return nothing return edge_next(eit.g, state) end diff --git a/src/SimpleGraphs/simplegraph.jl b/src/SimpleGraphs/simplegraph.jl index 5b48dbf16..f779789d7 100644 --- a/src/SimpleGraphs/simplegraph.jl +++ b/src/SimpleGraphs/simplegraph.jl @@ -13,7 +13,7 @@ end eltype(x::SimpleGraph{T}) where T = T # Graph{UInt8}(6), Graph{Int16}(7), Graph{UInt8}() -function SimpleGraph(n::Integer=0) where T <: Integer +function SimpleGraph{T}(n::Integer=0) where T <: Integer fadjlist = [Vector{T}() for _ = one(T):n] return SimpleGraph{T}(0, fadjlist) end @@ -28,7 +28,7 @@ SimpleGraph(n::T) where T <: Integer = SimpleGraph{T}(n) SimpleGraph(::Type{T}) where T <: Integer = SimpleGraph{T}(zero(T)) # Graph{UInt8}(adjmx) -function SimpleGraph(adjmx::AbstractMatrix) where T <: Integer +function SimpleGraph{T}(adjmx::AbstractMatrix) where T <: Integer dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) issymmetric(adjmx) || throw(ArgumentError("Adjacency / distance matrices must be symmetric")) @@ -41,7 +41,7 @@ function SimpleGraph(adjmx::AbstractMatrix) where T <: Integer end # converts Graph{Int} to Graph{Int32} -function SimpleGraph(g::SimpleGraph) where T <: Integer +function SimpleGraph{T}(g::SimpleGraph) where T <: Integer h_fadj = [Vector{T}(x) for x in fadj(g)] return SimpleGraph(ne(g), h_fadj) end diff --git a/test/simplegraphs/simpleedgeiter.jl b/test/simplegraphs/simpleedgeiter.jl index 43a2ea1b8..7e273138e 100644 --- a/test/simplegraphs/simpleedgeiter.jl +++ b/test/simplegraphs/simpleedgeiter.jl @@ -2,7 +2,9 @@ ga = @inferred(SimpleGraph(10, 20; seed=1)) gb = @inferred(SimpleGraph(10, 20; seed=1)) @test sprint(show, edges(ga)) == "SimpleEdgeIter 20" - @test sprint(show, iterate(edges(ga))[2]) == "SimpleEdgeIterState [1, 1]" + # note: we don't get the first iterator state, + #since iterate returns the state after taking the first value + @test sprint(show, iterate(edges(ga))[2]) == "SimpleEdgeIterState [1, 2]" @test length(collect(edges(Graph(0, 0)))) == 0 @@ -46,10 +48,11 @@ end eit = edges(ga) - es = @inferred(iterate(eit)[2]) + # @inferred not valid for new interface anymore (return type is a Union) + es = iterate(eit)[2] - @test es.s == 2 - @test es.di == 1 + @test es.s == 3 + @test es.di == 2 @test [e for e in eit] == [Edge(2, 3), Edge(3, 10), Edge(5, 10)] @@ -73,10 +76,10 @@ end eit = @inferred(edges(ga)) - es = @inferred(iterate(eit)[2]) + es = iterate(eit)[2] @test es.s == 3 - @test es.di == 1 + @test es.di == 2 @test [e for e in eit] == [ SimpleEdge(3, 2), SimpleEdge(3, 10),