Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix misc build issues on Blue Waters. #1418

Merged
merged 3 commits into from Jul 28, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
12 changes: 8 additions & 4 deletions configure
Expand Up @@ -36,7 +36,7 @@ fi

# `git archive` (man gitattributes(5)) fills this in, otherwise we do it manually...
COMMIT='$Format:%H$'
if [ "$(substr "$COMMIT" 1 1)" = '$' ]; then
if [ "$(expr substr "$COMMIT" 1 1)" = '$' ]; then
if [ "$GIT" = y ]; then
COMMIT="$(git rev-parse HEAD)"
else
Expand All @@ -54,7 +54,7 @@ fi

# The date is the commit date of the commit, or now if the working tree is dirty.
DATE='$Format:%ci$'
if [ "$(substr "$DATE" 1 1)" = '$' ]; then
if [ "$(expr substr "$DATE" 1 1)" = '$' ]; then
if [ "$GIT" = y ]; then
DATE="$(git log -1 --pretty=format:%ci HEAD)"
else
Expand All @@ -72,7 +72,7 @@ elif [ -n "$CCTOOLS_BRANCH" ]; then
BRANCH="$CCTOOLS_BRANCH"
else
BRANCH='$Format:%d$'
if [ "$(substr "$BRANCH" 1 1)" = '$' ]; then
if [ "$(expr substr "$BRANCH" 1 1)" = '$' ]; then
if [ "$GIT" = y ]; then
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
else
Expand All @@ -95,7 +95,7 @@ else
fi
fi

SOURCE="${BRANCH}:$(substr "$COMMIT" 1 8)${DIRTY}"
SOURCE="${BRANCH}:$(expr substr "$COMMIT" 1 8)${DIRTY}"
VERSION="$MAJOR.$MINOR.$MICRO [${SOURCE}]"

# Bourne shell is tricky when you use backtick command substitution backslash
Expand Down Expand Up @@ -732,6 +732,10 @@ then
then
ccflags="${ccflags} -DHAS_MYSQL_MYSQL_H"
fi

# When mysql is enabled, we also need ssl
config_openssl_path=auto

else
if [ $config_mysql_path = yes ]
then
Expand Down
2 changes: 1 addition & 1 deletion dttools/src/path_disk_size_info.c
Expand Up @@ -55,7 +55,7 @@ int path_disk_size_info_get_r(const char *path, int64_t max_secs, struct path_di

if((here->dir = opendir(path))) {
here->name = xxstrdup(path);
s->current_dirs = list_create(0);
s->current_dirs = list_create();
s->size_so_far = 0;
s->count_so_far = 1; /* count the root directory */
list_push_tail(s->current_dirs, here);
Expand Down
2 changes: 1 addition & 1 deletion dttools/src/rmonitor_poll.c
Expand Up @@ -454,7 +454,7 @@ int rmonitor_get_mmaps_usage(pid_t pid, struct hash_table *maps)
/* add the info to a sorted list per map, by start. Overlaping maps will be merged later. */
struct list *infos = hash_table_lookup(maps, info->map_name);
if(!infos) {
infos = list_create(0);
infos = list_create();
hash_table_insert(maps, info->map_name, infos);
}

Expand Down
2 changes: 1 addition & 1 deletion dttools/src/rmsummary.c
Expand Up @@ -684,7 +684,7 @@ struct list *rmsummary_parse_file_multiple(const char *filename)
struct jx_parser *p = jx_parser_create(0);
jx_parser_read_stream(p, stream);

struct list *lst = list_create(0);
struct list *lst = list_create();
struct rmsummary *s;

do
Expand Down
2 changes: 1 addition & 1 deletion makeflow/src/dag.c
Expand Up @@ -164,7 +164,7 @@ struct list *dag_input_files(struct dag *d)
char *filename;
struct list *il;

il = list_create(0);
il = list_create();

hash_table_firstkey(d->files);
while((hash_table_nextkey(d->files, &filename, (void **) &f)))
Expand Down
4 changes: 2 additions & 2 deletions makeflow/src/dag_node.c
Expand Up @@ -30,8 +30,8 @@ struct dag_node *dag_node_create(struct dag *d, int linenum)
n->nodeid = d->nodeid_counter++;
n->variables = hash_table_create(0, 0);

n->source_files = list_create(0);
n->target_files = list_create(0);
n->source_files = list_create();
n->target_files = list_create();

n->remote_names = itable_create(0);
n->remote_names_inv = hash_table_create(0, 0);
Expand Down
2 changes: 1 addition & 1 deletion makeflow/src/dag_visitors.c
Expand Up @@ -171,7 +171,7 @@ int dag_to_file_categories(const struct dag *d, FILE * dag_stream, char *(*renam
name = n->category->name;
ns = hash_table_lookup(nodes_of_category, name);
if(!ns) {
ns = list_create(0);
ns = list_create();
hash_table_insert(nodes_of_category, name, (void *) ns);
}
list_push_tail(ns, n);
Expand Down
4 changes: 2 additions & 2 deletions makeflow/src/lexer.c
Expand Up @@ -1243,7 +1243,7 @@ struct lexer *lexer_create(int type, void *data, int line_number, int column_num

lx->line_number = line_number;
lx->column_number = column_number;
lx->column_numbers = list_create(0);
lx->column_numbers = list_create();

lx->stream = NULL;
lx->buffer = NULL;
Expand All @@ -1255,7 +1255,7 @@ struct lexer *lexer_create(int type, void *data, int line_number, int column_num
lx->lexeme_size = 0;
lx->lexeme_max = BUFFER_CHUNK_SIZE;

lx->token_queue = list_create(0);
lx->token_queue = list_create();

lx->buffer = calloc(2 * BUFFER_CHUNK_SIZE, sizeof(char));
if(!lx->buffer)
Expand Down
4 changes: 2 additions & 2 deletions makeflow/src/makeflow_wrapper.c
Expand Up @@ -20,8 +20,8 @@ struct makeflow_wrapper * makeflow_wrapper_create()
struct makeflow_wrapper *w = malloc(sizeof(*w));
w->command = NULL;

w->input_files = list_create(0);
w->output_files = list_create(0);
w->input_files = list_create();
w->output_files = list_create();

w->remote_names = itable_create(0);
w->remote_names_inv = hash_table_create(0, 0);
Expand Down
4 changes: 2 additions & 2 deletions resource_monitor/src/resource_monitor.c
Expand Up @@ -772,7 +772,7 @@ int64_t peak_cores(int64_t wall_time, int64_t cpu_time) {
int64_t max_separation = 60 + 2*interval; /* at least one minute and a complete */

if(!samples) {
samples = list_create(0);
samples = list_create();

struct peak_cores_sample *zero = malloc(sizeof(struct peak_cores_sample));
zero->wall_time = 0;
Expand Down Expand Up @@ -1822,7 +1822,7 @@ int main(int argc, char **argv) {

total_bytes_rx = 0;
total_bytes_tx = 0;
tx_rx_sizes = list_create(0);
tx_rx_sizes = list_create();

rmsummary_read_env_vars(resources_limits);

Expand Down
10 changes: 5 additions & 5 deletions resource_monitor/src/resource_monitor_cluster.c
Expand Up @@ -351,7 +351,7 @@ void cluster_collect_summaries_recursive(struct cluster *c, struct list *accum)

struct list *cluster_collect_summaries(struct cluster *c)
{
struct list *summaries = list_create(0);
struct list *summaries = list_create();

cluster_collect_summaries_recursive(c, summaries);

Expand All @@ -378,7 +378,7 @@ struct cluster *nearest_neighbor_clustering(struct list *initial_clusters, doubl
if(list_size(initial_clusters) < 2)
return top;

stack = list_create(0);
stack = list_create();
list_push_head(stack, top);

/* Add all of the initial clusters as active clusters. */
Expand Down Expand Up @@ -472,7 +472,7 @@ struct list *collect_final_clusters(struct cluster *final, int max_clusters)
struct cluster *c, *cmax;
double dmax;

clusters = list_create(0);
clusters = list_create();
list_push_head(clusters, final);

/* At each count, we split the cluster with the maximal
Expand All @@ -481,7 +481,7 @@ struct list *collect_final_clusters(struct cluster *final, int max_clusters)
* clusters can be split. */
for(count = 1; count < max_clusters && count == list_size(clusters); count++)
{
clusters_next = list_create(0);
clusters_next = list_create();
list_first_item(clusters);

cmax = NULL;
Expand Down Expand Up @@ -658,7 +658,7 @@ void report_clusters_rules(FILE *freport, struct list *clusters)
struct list *create_initial_clusters(struct list *summaries)
{
struct rmDsummary *s;
struct list *initial_clusters = list_create(0);
struct list *initial_clusters = list_create();

list_first_item(summaries);
while( (s = list_next_item(summaries)) )
Expand Down