diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..7f6e015
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,87 @@
+ompiled source #
+###################
+*.com
+*.class
+*.dll
+*.exe
+*.o
+*.so
+
+# Packages #
+############
+# it's better to unpack these files and commit the raw source
+# git has its own built in compression methods
+*.7z
+*.dmg
+*.gz
+*.iso
+*.jar
+*.rar
+*.tar
+*.zip
+
+# Logs and databases #
+######################
+*.sql
+*.sqlite
+*.mat
+*.edges
+*.smat
+*.labels
+*.graphml
+*.gephi
+*.png
+*.eps
+*.jpg
+
+# OS generated files #
+######################
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.settings/
+.cproject
+.project
+.pydevproject
+data/
+exp/
+
+
+#Latex #
+##############
+*.acn
+*.acr
+*.alg
+*.aux
+*.bbl
+*.blg
+*.dvi
+*.fdb_latexmk
+*.glg
+*.glo
+*.gls
+*.idx
+*.ilg
+*.ind
+*.ist
+*.lof
+*.log
+*.lot
+*.maf
+*.mtc
+*.mtc0
+*.nav
+*.nlo
+*.out
+*.pdfsync
+*.ps
+*.snm
+*.synctex.gz
+*.toc
+*.vrb
+*.xdy
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 0000000..4cd82cd
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,23 @@
+License
+-------
+**Parallel Maximum Clique (PMC) Library**,
+Copyright (C) 2012-2013: Ryan A. Rossi, All rights reserved.
+
+>This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+>This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+>You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+
+If used, please cite the following manuscript:
+
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa
+ Patwary, A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs
+ and Temporal Strong Components, arXiv 2013
diff --git a/Makefile b/Makefile
new file mode 100755
index 0000000..ea62e70
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,39 @@
+#
+# Makefile for PMC
+#
+# Ryan A. Rossi
+# Copyright, 2012-2013
+#
+
+.KEEP_STATE:
+
+all: pmc
+
+OPTFLAGS = -O3
+CFLAGS = $(OPTFLAGS)
+CXX = g++
+H_FILES = pmc.h
+
+.cpp.o:
+ $(CXX) $(CFLAGS) -c $<
+
+IO_SRC = pmc_utils.cpp \
+ pmc_graph.cpp \
+ pmc_clique_utils.cpp
+
+PMC_SRC = pmc_heu.cpp \
+ pmc_maxclique.cpp \
+ pmcx_maxclique.cpp \
+ pmcx_maxclique_basic.cpp
+
+BOUND_LIB_SRC = pmc_cores.cpp
+
+PMC_MAIN = pmc_driver.cpp
+
+OBJ_PMC = $(PMC_MAIN:%.cpp=%.o) $(IO_SRC) $(PMC_SRC) $(BOUND_LIB_SRC)
+$(OBJ_PMC): $(H_FILES) Makefile
+pmc: $(OBJ_PMC) $(H_FILES)
+ $(CXX) $(CFLAGS) -o pmc $(OBJ_PMC) -fopenmp
+
+clean:
+ rm -rf *.o pmc
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..5558601
--- /dev/null
+++ b/README.md
@@ -0,0 +1,189 @@
+Parallel Maximum Clique (PMC) Library
+=====================================
+
+In short, a parameterized high performance library for computing maximum cliques in large sparse graphs.
+
+Finding maximum cliques, k-cliques, and temporal strong components are in general NP-hard.
+Yet, these can be computed fast in most social and information networks.
+The PMC library is designed to be fast for solving these problems.
+Algorithms in the PMC library are easily adaptable for use with a variety of orderings, heuristic strategies, and bounds.
+
+* **Maximum clique:** Given a simple undirected graph G and a number k, output the clique of largest size.
+* **K-clique:** In k-clique, the problem is to find a clique of size k if one exists.
+* **Largest temporal-scc:** Given a temporal graph G, a temporal strong component is a set of vertices where all temporal paths exist between the vertices in that set. The Largest TSCC problem is to find the largest among all the temporal strong components.
+
+
+
+Features
+--------
+0. General framework for parallel maximum clique algorithms
+1. Optimized to be fast for large sparse graphs
+ + Algorithms tested on networks of 1.8 billion edges
+2. Set of fast heuristics shown to give accurate approximations
+3. Algorithms for computing Temporal Strongly Connected Components (TSCC) of large dynamic networks
+4. Parameterized for computing k-cliques as fast as possible
+5. Includes a variety of tight linear time bounds for the maximum clique problem
+6. Ordering of vertices for each algorithm can be selected at runtime
+7. Dynamically reduces the graph representation periodically as vertices are pruned or searched
+ + Lowers memory-requirements for massive graphs, increases speed, and has caching benefits
+
+
+Synopsis
+---------
+
+### Setup
+First, you'll need to compile the parallel maximum clique library.
+
+ $ cd path/to/pmc/
+ $ make
+
+Afterwards, the following should work:
+
+ # compute maximum clique using the full algorithm `-a 0`
+ ./pmc -f data/socfb-Texas84.mtx -a 0
+
+
+*PMC* has been tested on Ubuntu linux (10.10 tested) and Mac OSX (Lion tested) with gcc-mp-4.7 and gcc-mp-4.5.4
+
+Please let me know if you run into any issues.
+
+
+
+### Input file format
++ Matrix Market Coordinate Format (symmetric)
+For details see:
+
+ %%MatrixMarket matrix coordinate pattern symmetric
+ 4 4 6
+ 2 1
+ 3 1
+ 3 2
+ 4 1
+ 4 2
+ 4 3
+
+
++ Edge list (symmetric and unweighted):
+ Codes for transforming the graph into the correct format are provided in the experiments directory.
+
+
+Overview
+---------
+
+The parallel maximum clique algorithms use tight bounds that are fast to compute.
+A few of those are listed below.
+
+* K-cores
+* Degree
+* Neighborhood cores
+* Greedy coloring
+
+All bounds are dynamically updated.
+
+Examples of the three main maximum clique algorithms are given below.
+Each essentially builds on the other.
+
+ # uses the four basic k-core pruning steps
+ ./pmc -f ../pmc/data/output/socfb-Stanford3.mtx -a 2
+
+ # k-core pruning and greedy coloring
+ ./pmc -f ../pmc/data/output/socfb-Stanford3.mtx -a 1
+
+ # neighborhood core pruning (and ordering for greedy coloring)
+ ./pmc -f ../pmc/data/output/socfb-Stanford3.mtx -a 0
+
+
+
+
+
+### Dynamic graph reduction
+
+The reduction wait parameter `-r` below is set to be 1 second (default = 4 seconds).
+
+ ./pmc -f data/sanr200-0-9.mtx -a 0 -t 2 -r 1
+
+In some cases, it may make sense to turn off the explicit graph reduction.
+This is done by setting the reduction wait time '-r' to be very large.
+
+ # Set the reduction wait parameter
+ ./pmc -f data/socfb-Stanford3.mtx -a 0 -t 2 -r 999
+
+
+
+
+
+
+### Orderings
+
+The PMC algorithms are easily adapted to use various ordering strategies.
+To prescribe a vertex ordering, use the -o option with one of the following:
++ `deg`
++ `kcore`
++ `dual_deg` orders vertices by the sum of degrees from neighbors
++ `dual_kcore` orders vertices by the sum of core numbers from neighbors
++ `kcore_deg` vertices are ordered by the weight k(v)d(v)
++ `rand` randomized ordering of vertices
+
+
+
+##### Direction of ordering
+
+Vertices are searched by default in increasing order, to search vertices in decreasing order, use the `d` option:
+
+ ./pmc -f data/p-hat700-2.mtx -a 0 -d
+
+
+
+
+### Heuristic
+The fast heuristic may also be customized to use various greedy selection strategies.
+This is done by using `-h` with one of the following:
+
++ `deg`
++ `kcore`
++ `kcore_deg` select vertex that maximizes k(v)d(v)
++ `rand` randomly select vertices
+
+
+#### Terminate after applying the heuristic
+Approximate the maximum clique using _ONLY_ the heuristic by not setting the exact algorithm via the `-a [num]` option.
+For example:
+
+ ./pmc -f data/sanr200-0-9.mtx -h deg
+
+#### Turning the heuristic off
+
+ # heuristic is turned off by setting `-h 0`.
+ ./pmc -f data/tscc_enron-only.mtx -h 0 -a 0
+
+
+
+### K-clique
+
+The parallel maximum clique algorithms have also been parameterized to find cliques of size k.
+This routine is useful for many tasks in network analysis such as mining graphs and community detection.
+
+ # Computes a clique of size 50 from the Stanford facebook network
+ ./pmc -f data/socfb-Stanford3.mtx -a 0 -k 50
+
+
+using `-o rand` to find potentially different cliques of a certain size
+
+ # Computes a clique of size 36 from sanr200-0-9
+ ./pmc -f data/sanr200-0-9.mtx -a 0 -k 36 -o rand
+
+
+
+Terms and conditions
+--------------------
+Please feel free to use these codes. We only ask that you cite:
+
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa Patwary,
+ A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs and Temporal
+ Strong Components, arXiv preprint 1302.6256, 2013.
+
+_These codes are research prototypes and may not work for you. No promises. But do email if you run into problems._
+
+
+Copyright 2011-2013, Ryan A. Rossi. All rights reserved.
+
\ No newline at end of file
diff --git a/pmc.h b/pmc.h
new file mode 100755
index 0000000..4ecdb26
--- /dev/null
+++ b/pmc.h
@@ -0,0 +1,32 @@
+/**
+ ============================================================================
+ Name : Parallel Maximum Clique (PMC) Library
+ Author : Ryan A. Rossi (rrossi@purdue.edu)
+ Description : A general high-performance parallel framework for computing
+ maximum cliques. The library is designed to be fast for large
+ sparse graphs.
+
+ Copyright (C) 2012-2013, Ryan A. Rossi, All rights reserved.
+
+ Please cite the following paper if used:
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa
+ Patwary, A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs
+ and Temporal Strong Components, arXiv preprint 1302.6256, 2013.
+
+ See http://ryanrossi.com/pmc for more information.
+ ============================================================================
+ */
+
+#ifndef __PMC_H__
+#define __PMC_H__
+
+#include "pmc_headers.h"
+#include "pmc_input.h"
+#include "pmc_utils.h"
+
+#include "pmc_heu.h"
+#include "pmc_maxclique.h"
+#include "pmcx_maxclique.h"
+#include "pmcx_maxclique_basic.h"
+
+#endif
diff --git a/pmc_clique_utils.cpp b/pmc_clique_utils.cpp
new file mode 100644
index 0000000..08d8218
--- /dev/null
+++ b/pmc_clique_utils.cpp
@@ -0,0 +1,201 @@
+/**
+ ============================================================================
+ Name : Parallel Maximum Clique (PMC) Library
+ Author : Ryan A. Rossi (rrossi@purdue.edu)
+ Description : A general high-performance parallel framework for computing
+ maximum cliques. The library is designed to be fast for large
+ sparse graphs.
+
+ Copyright (C) 2012-2013, Ryan A. Rossi, All rights reserved.
+
+ Please cite the following paper if used:
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa
+ Patwary, A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs
+ and Temporal Strong Components, arXiv preprint 1302.6256, 2013.
+
+ See http://ryanrossi.com/pmc for more information.
+ ============================================================================
+ */
+
+#include "pmc_graph.h"
+#include
+
+using namespace std;
+using namespace pmc;
+
+int pmc_graph::initial_pruning(pmc_graph& G, int* &pruned, int lb) {
+ int lb_idx = 0;
+ for (int i = G.num_vertices()-1; i >= 0; i--) {
+ if (kcore[kcore_order[i]] == lb) lb_idx = i;
+ if (kcore[kcore_order[i]] <= lb) pruned[kcore_order[i]] = 1;
+ }
+
+ double sec = get_time();
+ cout << "[pmc: initial k-core pruning] before pruning: |V| = " << G.num_vertices();
+ cout << ", |E| = " << G.num_edges() <= 0; i--) {
+ if (kcore[kcore_order[i]] == lb) lb_idx = i;
+ if (kcore[kcore_order[i]] <= lb) {
+ pruned[kcore_order[i]] = 1;
+ for (long long j = vertices[kcore_order[i]]; j < vertices[kcore_order[i] + 1]; j++) {
+ adj[kcore_order[i]][edges[j]] = false;
+ adj[edges[j]][kcore_order[i]] = false;
+ }
+ }
+ }
+
+ double sec = get_time();
+ cout << "[pmc: initial k-core pruning] before pruning: |V| = " << G.num_vertices() << ", |E| = " << G.num_edges() < &V, pmc_graph &G,
+ int &lb_idx, int &lb, string vertex_ordering, bool decr_order) {
+
+ srand (time(NULL));
+ int u = 0, val = 0;
+ for (int k = lb_idx; k < G.num_vertices(); k++) {
+ if (degree[kcore_order[k]] >= lb - 1) {
+ u = kcore_order[k];
+
+ if (vertex_ordering == "deg")
+ val = vertices[u + 1] - vertices[u];
+ else if (vertex_ordering == "kcore")
+ val = kcore[u];
+ else if (vertex_ordering == "kcore_deg")
+ val = degree[u] * kcore[u];
+ else if (vertex_ordering == "rand")
+ val = rand() % vertices.size();
+ // neighbor degrees
+ else if (vertex_ordering == "dual_deg") {
+ val = 0;
+ for (long long j = vertices[u]; j < vertices[u + 1]; j++) {
+ val = val + G.vertex_degree(edges[j]);
+ }
+ }
+ // neighbor degrees
+ else if (vertex_ordering == "dual_kcore") {
+ val = 0;
+ for (long long j = vertices[u]; j < vertices[u + 1]; j++) {
+ val = val + kcore[edges[j]];
+ }
+ }
+ else val = vertices[u + 1] - vertices[u];
+ V.push_back(Vertex(u,val));
+ }
+ }
+ if (decr_order)
+ std::sort(V.begin(), V.end(), decr_bound);
+ else
+ std::sort(V.begin(), V.end(), incr_bound);
+}
+
+
+/**
+ * Reduce the graph by removing the pruned vertices
+ * + Systematically speeds algorithm up by reducing the neighbors as more vertices are searched
+ *
+ * The algorithm below is for parallel maximum clique finders and has the following features:
+ * + Thread-safe, since local copy of vertices/edges are passed in..
+ * + Pruned is a shared variable, but it is safe, since only reads/writes can occur, no deletion
+ */
+void pmc_graph::reduce_graph(
+ vector& vs,
+ vector& es,
+ int* &pruned,
+ pmc_graph& G,
+ int id,
+ int& mc) {
+
+ int num_vs = vs.size();
+
+ vector V(num_vs,0);
+ vector E;
+ E.reserve(es.size());
+
+ int start = 0;
+ for (int i = 0; i < num_vs - 1; i++) {
+ start = E.size();
+ if (!pruned[i]) { //skip these V_local...
+ for (long long j = vs[i]; j < vs[i + 1]; j++ ) {
+ if (!pruned[es[j]])
+ E.push_back(es[j]);
+ }
+ }
+ V[i] = start;
+ V[i + 1] = E.size();
+ }
+ vs = V;
+ es = E;
+
+ // compute k-cores and share bounds: ensure operation completed by single process
+ #pragma omp single nowait
+ {
+ cout << ">>> [pmc: thread " << omp_get_thread_num() + 1 << "]" < &C_max, double &sec) {
+ cout << "*** [pmc: thread " << omp_get_thread_num() + 1;
+ cout << "] current max clique = " << C_max.size();
+ cout << ", time = " << get_time() - sec << " sec" < &C_max, double sec, double time_limit, bool &time_expired_msg) {
+ if ((get_time() - sec) > time_limit) {
+ if (time_expired_msg) {
+ cout << "\n### Time limit expired, terminating search. ###" <& V,
+ vector& E,
+ int* &pruned) {
+
+ long long n, d, i, j, start, num, md;
+ long long v, u, w, du, pu, pw, md_end;
+ n = vertices.size();
+
+ vector pos_tmp(n);
+ vector core_tmp(n);
+ vector order_tmp(n);
+
+ md = 0;
+ for(v=1; v md) md = core_tmp[v];
+ }
+
+ md_end = md+1;
+ vector < int > bin(md_end,0);
+
+ for (v=1; v < n; v++) bin[core_tmp[v]]++;
+
+ start = 1;
+ for (d=0; d < md_end; d++) {
+ num = bin[d];
+ bin[d] = start;
+ start = start + num;
+ }
+
+ for (v=1; v 1; d--) bin[d] = bin[d-1];
+ bin[0] = 1;
+
+ for (i = 1; i < n; i++) {
+ v=order_tmp[i];
+ for (j = V[v-1]; j < V[v]; j++) {
+ u = E[j] + 1;
+ if (core_tmp[u] > core_tmp[v]) {
+ du = core_tmp[u]; pu = pos_tmp[u];
+ pw = bin[du]; w = order_tmp[pw];
+ if (u != w) {
+ pos_tmp[u] = pw; order_tmp[pu] = w;
+ pos_tmp[w] = pu; order_tmp[pw] = u;
+ }
+ bin[du]++; core_tmp[u]--;
+ }
+ }
+ }
+
+ for (v=0; v pos(n);
+ if (kcore_order.size() > 0) {
+ vector tmp(n,0);
+ kcore = tmp;
+ kcore_order = tmp;
+ }
+ else {
+ kcore_order.resize(n);
+ kcore.resize(n);
+ }
+
+ md = 0;
+ for (v=1; v md) md = kcore[v];
+ }
+
+ md_end = md+1;
+ vector < int > bin(md_end,0);
+
+ for (v=1; v < n; v++) bin[kcore[v]]++;
+
+ start = 1;
+ for (d=0; d < md_end; d++) {
+ num = bin[d];
+ bin[d] = start;
+ start = start + num;
+ }
+
+ // bucket sort
+ for (v=1; v 1; d--) bin[d] = bin[d-1];
+ bin[0] = 1;
+
+ // kcores
+ for (i=1; i kcore[v]) {
+ du = kcore[u]; pu = pos[u];
+ pw = bin[du]; w = kcore_order[pw];
+ if (u != w) {
+ pos[u] = pw; kcore_order[pu] = w;
+ pos[w] = pu; kcore_order[pw] = u;
+ }
+ bin[du]++; kcore[u]--;
+ }
+ }
+ }
+
+ for (v = 0; v < n-1; v++) {
+ kcore[v] = kcore[v+1] + 1; // K + 1
+ kcore_order[v] = kcore_order[v+1]-1;
+ }
+ max_core = kcore[kcore_order[num_vertices()-1]] - 1;
+
+ bin.clear();
+ pos.clear();
+}
diff --git a/pmc_driver.cpp b/pmc_driver.cpp
new file mode 100644
index 0000000..43de714
--- /dev/null
+++ b/pmc_driver.cpp
@@ -0,0 +1,119 @@
+/**
+ ============================================================================
+ Name : Parallel Maximum Clique (PMC) Library
+ Author : Ryan A. Rossi (rrossi@purdue.edu)
+ Description : A general high-performance parallel framework for computing
+ maximum cliques. The library is designed to be fast for large
+ sparse graphs.
+
+ Copyright (C) 2012-2013, Ryan A. Rossi, All rights reserved.
+
+ Please cite the following paper if used:
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa
+ Patwary, A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs
+ and Temporal Strong Components, arXiv preprint 1302.6256, 2013.
+
+ See http://ryanrossi.com/pmc for more information.
+ ============================================================================
+ */
+
+#include "pmc.h"
+
+using namespace std;
+using namespace pmc;
+
+int main(int argc, char *argv[]) {
+
+ //! parse command args
+ input in(argc, argv);
+ if (in.help) {
+ usage(argv[0]);
+ return 0;
+ }
+
+ //! read graph
+ pmc_graph G(in.graph_stats,in.graph);
+ if (in.graph_stats) { G.bound_stats(in.algorithm, in.lb, G); }
+
+ //! ensure wait time is greater than the time to recompute the graph data structures
+ if (G.num_edges() > 1000000000 && in.remove_time < 120) in.remove_time = 120;
+ else if (G.num_edges() > 250000000 && in.remove_time < 10) in.remove_time = 10;
+ cout << "explicit reduce is set to " << in.remove_time << " seconds" < C;
+ if (in.lb == 0 && in.heu_strat != "0") { // skip if given as input
+ pmc_heu maxclique(G,in);
+ in.lb = maxclique.search(G, C);
+ cout << "Heuristic found clique of size " << in.lb;
+ cout << " in " << get_time() - seconds << " seconds" <= 0) {
+ switch(in.algorithm) {
+ case 0: {
+ //! k-core pruning, neigh-core pruning/ordering, dynamic coloring bounds/sort
+ if (G.num_vertices() < in.adj_limit) {
+ G.create_adj();
+ pmcx_maxclique finder(G,in);
+ finder.search_dense(G,C);
+ break;
+ }
+ else {
+ pmcx_maxclique finder(G,in);
+ finder.search(G,C);
+ break;
+ }
+ }
+ case 1: {
+ //! k-core pruning, dynamic coloring bounds/sort
+ if (G.num_vertices() < in.adj_limit) {
+ G.create_adj();
+ pmcx_maxclique_basic finder(G,in);
+ finder.search_dense(G,C);
+ break;
+ }
+ else {
+ pmcx_maxclique_basic finder(G,in);
+ finder.search(G,C);
+ break;
+ }
+ }
+ case 2: {
+ //! simple k-core pruning (four new pruning steps)
+ pmc_maxclique finder(G,in);
+ finder.search(G,C);
+ break;
+ }
+ default:
+ cout << "algorithm " << in.algorithm << " not found." < > vert_list;
+ int v = 0, u = 0, num_es = 0, self_edges = 0;
+
+ ifstream in_check (filename.c_str());
+ if (!in_check) { cout << filename << "File not found!" <> v >> u;
+ if (v == 0 || u == 0) {
+ fix_start_idx = false;
+ break;
+ }
+ }
+ }
+ ifstream in (filename.c_str());
+ if (!in) { cout << filename << "File not found!" <> v >> u;
+
+ if (fix_start_idx) {
+ v--;
+ u--;
+ }
+ if (v == u) self_edges++;
+ else {
+ vert_list[v].push_back(u);
+ vert_list[u].push_back(v);
+ }
+ }
+ }
+ vertices.push_back(edges.size());
+ for (int i=0; i < vert_list.size(); i++) {
+ edges.insert(edges.end(),vert_list[i].begin(),vert_list[i].end());
+ vertices.push_back(edges.size());
+ }
+ vert_list.clear();
+ vertex_degrees();
+ cout << "self-loops: " << self_edges < > v_map;
+ map > valueList;
+ int col=0, row=0, ridx=0, cidx=0;
+ int entry_counter = 0, num_of_entries = 0;
+ double value;
+
+ ifstream in (filename.c_str());
+ if(!in) {
+ cout<0&&line[0]=='%') getline(in,line);
+ in2.str(line);
+ in2 >> row >> col >> num_of_entries;
+
+ if(row!=col) {
+ cout<<"* ERROR: This is not a square matrix."<> ridx >> cidx >> value;
+ ridx--;
+ cidx--;
+
+ if (ridx < 0 || ridx >= row) cout << "sym-mtx error: " << ridx << " row " << row << endl;
+ if (cidx < 0 || cidx >= col) cout << "sym-mtx error: " << cidx << " col " << col << endl;
+ if (ridx == cidx) continue;
+
+ if (ridx > cidx) {
+ if (b_getValue) {
+ if(value > connStrength) {
+ v_map[ridx].push_back(cidx);
+ v_map[cidx].push_back(ridx);
+ if (is_gstats) {
+ e_v.push_back(ridx);
+ e_u.push_back(cidx);
+ }
+ }
+ } else {
+ v_map[ridx].push_back(cidx);
+ v_map[cidx].push_back(ridx);
+ if (is_gstats) {
+ e_v.push_back(ridx);
+ e_u.push_back(cidx);
+ }
+ }
+
+ if (b_getValue && value > connStrength) {
+ valueList[ridx].push_back(value);
+ valueList[cidx].push_back(value);
+ }
+ } else {
+ cout << "* WARNING: Found a nonzero in the upper triangular. ";
+ break;
+ }
+ }
+ }
+ vertices.push_back(edges.size());
+ for (int i=0;i < row; i++) {
+ edges.insert(edges.end(),v_map[i].begin(),v_map[i].end());
+ vertices.push_back(edges.size());
+ }
+ v_map.clear();
+ valueList.clear();
+ vertex_degrees();
+}
+
+void pmc_graph::read_metis(const string& filename) { return; };
+
+void pmc_graph::create_adj() {
+ double sec = get_time();
+
+ int size = num_vertices();
+ adj = new bool*[size];
+ for (int i = 0; i < size; i++) {
+ adj[i] = new bool[size];
+ memset(adj[i], 0, size * sizeof(bool));
+ }
+
+ for (int i = 0; i < num_vertices(); i++) {
+ for (long long j = vertices[i]; j < vertices[i + 1]; j++ )
+ adj[i][edges[j]] = true;
+ }
+ cout << "Created adjacency matrix in " << get_time() - sec << " seconds" < 0) {
+ if (max_degree < degree[v]) max_degree = degree[v];
+ p++;
+ }
+ }
+ avg_degree = (double)edges.size() / p;
+ return;
+}
+
+
+void pmc_graph::update_degrees(int* &pruned, int& mc) {
+ max_degree = -1;
+ min_degree = std::numeric_limits::max();
+ int p = 0;
+ for (long long v=0; v < num_vertices(); v++) {
+ degree[v] = vertices[v+1] - vertices[v];
+ if (degree[v] < mc) {
+ if (!pruned[v]) pruned[v] = 1;
+ p++;
+ }
+ else {
+ if (max_degree < degree[v]) max_degree = degree[v];
+ if (degree[v] < min_degree) min_degree = degree[v];
+ }
+ }
+ avg_degree = (double)edges.size() / p;
+ cout << ", pruned: " << p << endl;
+}
+
+
+void pmc_graph::update_kcores(int* &pruned) {
+
+ long long n, d, i, j, start, num, md;
+ long long v, u, w, du, pu, pw, md_end;
+ n = vertices.size();
+ kcore.resize(n);
+ fill(kcore.begin(), kcore.end(), 0);
+ vector pos_tmp(n);
+ vector order_tmp(n);
+
+ md = 0;
+ for(v=1; v md) md = kcore[v];
+ }
+ }
+
+ md_end = md+1;
+ vector < int > bin(md_end,0);
+
+ for (v=1; v < n; v++) bin[kcore[v]]++;
+
+ start = 1;
+ for (d=0; d < md_end; d++) {
+ num = bin[d];
+ bin[d] = start;
+ start = start + num;
+ }
+
+ for (v=1; v 1; d--) bin[d] = bin[d-1];
+ bin[0] = 1;
+
+ for (i = 1; i < n; i++) {
+ v=order_tmp[i];
+ if (!pruned[v-1]) {
+ for (j = vertices[v-1]; j < vertices[v]; j++) {
+ if (!pruned[edges[j]]) {
+ u = edges[j] + 1;
+ if (kcore[u] > kcore[v]) {
+ du = kcore[u]; pu = pos_tmp[u];
+ pw = bin[du]; w = order_tmp[pw];
+ if (u != w) {
+ pos_tmp[u] = pw; order_tmp[pu] = w;
+ pos_tmp[w] = pu; order_tmp[pw] = u;
+ }
+ bin[du]++; kcore[u]--;
+ }
+ }
+ }
+ }
+ }
+
+ max_core = 0;
+ for (v=0; v max_core) max_core = kcore[v];
+ }
+ else kcore[v] = 0;
+ }
+ cout << "[pmc: updated cores] K: " << max_core < V(vertices.size(),0);
+ vector E;
+ E.reserve(edges.size());
+
+ int start = 0;
+ for (int i = 0; i < num_vertices(); i++) {
+ start = E.size();
+ if (!pruned[i]) {
+ for (long long j = vertices[i]; j < vertices[i + 1]; j++ ) {
+ if (!pruned[edges[j]])
+ E.push_back(edges[j]);
+ }
+ }
+ V[i] = start;
+ V[i + 1] = E.size();
+ }
+ vertices = V;
+ edges = E;
+}
+
+
+void pmc_graph::reduce_graph(
+ vector& vs,
+ vector& es,
+ int* &pruned,
+ int id,
+ int& mc) {
+
+ int num_vs = vs.size();
+
+ vector V(num_vs,0);
+ vector E;
+ E.reserve(es.size());
+
+ int start = 0;
+ for (int i = 0; i < num_vs - 1; i++) {
+ start = E.size();
+ if (!pruned[i]) { //skip these V_local...
+ for (long long j = vs[i]; j < vs[i + 1]; j++ ) {
+ if (!pruned[es[j]])
+ E.push_back(es[j]);
+ }
+ }
+ V[i] = start;
+ V[i + 1] = E.size();
+ }
+ vs = V;
+ es = E;
+}
+
+
+void pmc_graph::bound_stats(int alg, int lb, pmc_graph& G) {
+ cout << "graph: " << fn <& bound, vector& order) {
+ long long n, d, start, num, md;
+ long long v, md_end;
+
+ n = bound.size();
+ order.reserve(n);
+ vector < long long > pos(n);
+
+ md = 0;
+ for(v=1; v md) md = bound[v];
+
+ md_end = md+1;
+ vector < long long > bin(md_end,0);
+
+ for (v=1; v < n; v++) bin[bound[v]]++;
+
+ start = 1;
+ for (d=0; d < md_end; d++) {
+ num = bin[d];
+ bin[d] = start;
+ start = start + num;
+ }
+
+ for (v=1; v 1; d--) bin[d] = bin[d-1];
+ bin[0] = 1;
+
+ for (v=0; v tmp_edges;
+ tmp_edges.reserve(edges.size());
+
+ for (v = 0; v < num_vertices(); v++) {
+
+ n = vertices[v+1] - vertices[v] + 1;
+ vector vert(n);
+ vector pos(n);
+ vector deg(n);
+
+ md = 0;
+ for(u=1; u md)
+ md = deg[u];
+ }
+
+ md_end = md+1;
+ vector < int > bin(md_end,0);
+
+ for (u=1; u < n; u++) bin[deg[u]]++;
+
+ start = 1;
+ for (d=0; d < md_end; d++) {
+ num = bin[d];
+ bin[d] = start;
+ start = start + num;
+ }
+
+ for (u=1; u C) {
+ int u = 0;
+ vector ind(G.num_vertices(),0);
+ for (size_t i = 0; i < C.size(); i++) ind[C[i]] = 1;
+
+
+ // ensure each vertex in C has |C|-1 edges between each other
+ for (size_t i = 0; i < C.size(); i++) {
+ u = C[i];
+ int sz = 0;
+ for (long long j = G.vertices[u]; j < G.vertices[u+1]; j++)
+ if (ind[G.edges[j]]) sz++;
+
+ // check if connected to |C|-1 vertices
+ if (sz != C.size()-1)
+ return false;
+ }
+ return true;
+}
diff --git a/pmc_graph.h b/pmc_graph.h
new file mode 100644
index 0000000..1787bcb
--- /dev/null
+++ b/pmc_graph.h
@@ -0,0 +1,149 @@
+/**
+ ============================================================================
+ Name : Parallel Maximum Clique (PMC) Library
+ Author : Ryan A. Rossi (rrossi@purdue.edu)
+ Description : A general high-performance parallel framework for computing
+ maximum cliques. The library is designed to be fast for large
+ sparse graphs.
+
+ Copyright (C) 2012-2013, Ryan A. Rossi, All rights reserved.
+
+ Please cite the following paper if used:
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa
+ Patwary, A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs
+ and Temporal Strong Components, arXiv preprint 1302.6256, 2013.
+
+ See http://ryanrossi.com/pmc for more information.
+ ============================================================================
+ */
+
+#ifndef PMC_GRAPH_H_
+#define PMC_GRAPH_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "math.h"
+#include "pmc_headers.h"
+#include "pmc_utils.h"
+#include "pmc_vertex.h"
+
+
+namespace pmc {
+ class pmc_graph {
+ private:
+ // helper functions
+ void read_mtx(const string& filename);
+ void read_edges(const string& filename);
+ void read_metis(const string& filename);
+
+ public:
+ vector edges;
+ vector vertices;
+ vector degree;
+ int min_degree;
+ int max_degree;
+ int avg_degree;
+ bool is_gstats;
+ string fn;
+ bool** adj;
+
+ // constructor
+ pmc_graph(const string& filename);
+ pmc_graph(bool graph_stats, const string& filename);
+ pmc_graph(const string& filename, bool make_adj);
+ pmc_graph(vector vs, vector es) {
+ edges = es;
+ vertices = vs;
+ vertex_degrees();
+ }
+ // destructor
+ ~pmc_graph();
+
+ void read_graph(const string& filename);
+ void create_adj();
+ void reduce_graph(int* &pruned);
+ void reduce_graph(
+ vector& vs,
+ vector& es,
+ int* &pruned,
+ int id,
+ int& mc);
+
+ int num_vertices() { return vertices.size() - 1; }
+ int num_edges() { return edges.size()/2; }
+ vector * get_vertices(){ return &vertices; }
+ vector* get_edges(){ return &edges; }
+ vector* get_degree(){ return °ree; }
+ vector get_edges_array() { return edges; }
+ vector get_vertices_array() { return vertices; };
+ vector e_v, e_u, eid;
+
+ int vertex_degree(int v) { return vertices[v] - vertices[v+1]; }
+ long long first_neigh(int v) { return vertices[v]; }
+ long long last_neigh(int v) { return vertices[v+1]; }
+
+ void sum_vertex_degrees();
+ void vertex_degrees();
+ void update_degrees();
+ void update_degrees(bool flag);
+ void update_degrees(int* &pruned, int& mc);
+ double density() { return (double)num_edges() / (num_vertices() * (num_vertices() - 1.0) / 2.0); }
+ int get_max_degree() { return max_degree; }
+ int get_min_degree() { return min_degree; }
+ double get_avg_degree() { return avg_degree; }
+
+ void initialize();
+ string get_file_extension(const string& filename);
+ void basic_stats(double sec);
+ void bound_stats(int alg, int lb, pmc_graph& G);
+
+ // vertex sorter
+ void compute_ordering(vector& bound, vector& order);
+ void compute_ordering(string degree, vector& order);
+ // edge sorters
+ void degree_bucket_sort();
+ void degree_bucket_sort(bool desc);
+
+ int max_core;
+ vector kcore;
+ vector kcore_order;
+ vector* get_kcores() { return &kcore; }
+ vector* get_kcore_ordering() { return &kcore_order; }
+ int get_max_core() { return max_core; }
+ void update_kcores(int* &pruned);
+
+ void compute_cores();
+ void induced_cores_ordering(
+ vector& V,
+ vector& E,
+ int* &pruned);
+
+ // clique utils
+ int initial_pruning(pmc_graph& G, int* &pruned, int lb);
+ int initial_pruning(pmc_graph& G, int* &pruned, int lb, bool** &adj);
+ void order_vertices(vector &V, pmc_graph &G,
+ int &lb_idx, int &lb, string vertex_ordering, bool decr_order);
+
+ void print_info(vector &C_max, double &sec);
+ void print_break();
+ bool time_left(vector &C_max, double sec,
+ double time_limit, bool &time_expired_msg);
+ void graph_stats(pmc_graph& G, int& mc, int id, double &sec);
+
+ void reduce_graph(
+ vector& vs,
+ vector& es,
+ int* &pruned,
+ pmc_graph& G,
+ int id,
+ int& mc);
+
+ bool clique_test(pmc_graph& G, vector C);
+ };
+
+}
+#endif
diff --git a/pmc_headers.h b/pmc_headers.h
new file mode 100644
index 0000000..7f328d2
--- /dev/null
+++ b/pmc_headers.h
@@ -0,0 +1,41 @@
+/**
+ ============================================================================
+ Name : Parallel Maximum Clique (PMC) Library
+ Author : Ryan A. Rossi (rrossi@purdue.edu)
+ Description : A general high-performance parallel framework for computing
+ maximum cliques. The library is designed to be fast for large
+ sparse graphs.
+
+ Copyright (C) 2012-2013, Ryan A. Rossi, All rights reserved.
+
+ Please cite the following paper if used:
+ Ryan A. Rossi, David F. Gleich, Assefaw H. Gebremedhin, Md. Mostofa
+ Patwary, A Fast Parallel Maximum Clique Algorithm for Large Sparse Graphs
+ and Temporal Strong Components, arXiv preprint 1302.6256, 2013.
+
+ See http://ryanrossi.com/pmc for more information.
+ ============================================================================
+ */
+
+#ifndef PMC_HEADERS_H_
+#define PMC_HEADERS_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include