Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 81 additions & 0 deletions .github/workflows/build-container.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
name: build container

on:
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
push:
branches:
- main
tags:
- 'v*'

env:
REGISTRY: ghcr.io
PATH: /opt/env/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/cuda-11.8/bin

jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
-
name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
tool-cache: false
-
name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
-
name: log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

# build the singularity image as a sandbox directory
# inside a docker container that has singularity
# installed (take a big breath). Then tar that directory
# so that we can import it into docker. Doing everything
# in one fell swoop because of permissions discrepancies
# inside and outside the container.
-
name: build singularity image
run: |
docker run \
--rm \
-v ${{ github.workspace }}:/opt/buoy \
--workdir /opt/buoy \
--privileged \
--entrypoint /bin/bash \
quay.io/singularity/singularity:v3.8.1 \
-c 'singularity build --sandbox /opt/buoy/sandbox apptainer.def && tar -czf /opt/buoy/app.tar.gz -C /opt/buoy/sandbox .'

# now copy the fs contents into an empty
# container and push it to the registry,
# using a lowercase version of the tag since
# the github environment variables are case-sensitive
-
name: build and push docker image
# only run on pushes so that we aren't
# building containers for PRs
if: ${{ github.event_name == 'push' || startsWith(github.ref, 'refs/tags/') }}
env:
tag: ${{ env.REGISTRY }}/${{ github.repository }}/buoy:${{ github.ref_name }}
run: |
export TAG_LC=${tag,,}
cat app.tar.gz | docker import --change "ENV PATH=${{ env.PATH }}" - $TAG_LC
docker push $TAG_LC



2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
outdir
dist
ml4gw_buoy.egg-info
build

**/__pycache__
52 changes: 52 additions & 0 deletions apptainer.def
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
Bootstrap: docker
From: mambaorg/micromamba:1.5
Stage: build

%files
./conda-lock.yml /opt/buoy/conda-lock.yml
./pyproject.toml /opt/buoy/pyproject.toml
./uv.lock /opt/buoy/uv.lock
./README.md /opt/buoy/README.md
./buoy /opt/buoy/buoy

%post
mkdir -p /cvmfs /hdfs /gpfs /ceph /hadoop

# install git for pip installation
apt-get update
apt-get install -y build-essential
apt-get clean

# activate micromamba and create environment from lockfile
/bin/bash /root/.bashrc
micromamba create -p /opt/env -f /opt/buoy/conda-lock.yml

# install uv so we can install local deps of deps editably
#micromamba run -p /opt/env python -m \
# pip install --no-build-isolation uv

cd /opt/buoy
micromamba run -p /opt/env \
uv export --frozen --all-extras -o requirements.txt

# Needs to be pip sync to discover conda env
micromamba run -p /opt/env \
uv pip install -r requirements.txt

# initialize our shell so that we can execute
# commands in our environment at run time
micromamba shell init --shell=bash --root-prefix=~/micromamba


# set path, and add it to /etc/profile
# so that it will be set if login shell
# is invoked
export PATH=/opt/env/bin:$PATH
echo export PATH=$PATH >> /etc/profile


%runscript
#!/bin/bash
eval "$(micromamba shell hook --shell bash)"
micromamba activate /opt/env
exec "$@"
11 changes: 0 additions & 11 deletions buoy/cli.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,8 @@
import logging
import sys

import jsonargparse
from buoy.main import main


def cli(args=None):
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(
format=log_format,
level=logging.INFO,
stream=sys.stdout,
)
logging.getLogger("bilby").setLevel(logging.WARNING)

parser = jsonargparse.ArgumentParser()
parser.add_function_arguments(main, fail_untyped=False, sub_configs=True)
parser.add_argument("--config", action="config")
Expand Down
33 changes: 31 additions & 2 deletions buoy/main.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,20 @@
import logging
import sys
import warnings
from pathlib import Path
from typing import List, Optional, Union
import warnings

import numpy as np
import torch

from .models import Aframe, Amplfi
from .utils.data import get_data
from .utils.html import generate_html
from .utils.plotting import plot_aframe_response, plot_amplfi_result
from .utils.plotting import (
plot_aframe_response,
plot_amplfi_result,
q_plots,
)


def main(
Expand All @@ -29,6 +34,7 @@ def main(
device: Optional[str] = None,
to_html: bool = False,
seed: Optional[int] = None,
verbose: bool = False,
):
"""
Main function to run Aframe and AMPLFI on the given events
Expand Down Expand Up @@ -76,7 +82,19 @@ def main(
If True, generate an HTML summary page.
seed:
Random seed for reproducibility of AMPLFI results.
verbose:
If True, log at the DEBUG level. Else, log at INFO level.
"""
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(
format=log_format,
level=logging.DEBUG if verbose else logging.INFO,
stream=sys.stdout,
)
logging.getLogger("bilby").setLevel(logging.WARNING)
logging.getLogger("gwdatafind").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)

if seed is not None:
torch.manual_seed(seed)

Expand Down Expand Up @@ -132,6 +150,7 @@ def main(
data, ifos, t0, event_time = get_data(
event=event,
sample_rate=aframe.sample_rate,
psd_length=aframe.psd_length,
datadir=datadir,
)
data = torch.Tensor(data).double()
Expand Down Expand Up @@ -171,6 +190,16 @@ def main(
whitened_data = np.concatenate([whitened_times[None], whitened])
np.save(datadir / "whitened_data.npy", whitened_data)

logging.info("Creating Q-plots")
q_plots(
data=data.squeeze().cpu().numpy(),
t0=t0,
plotdir=plotdir,
gpstime=event_time,
sample_rate=amplfi.sample_rate,
amplfi_highpass=amplfi.highpass,
)

logging.info("Plotting Aframe response")
plot_aframe_response(
times=times,
Expand Down
4 changes: 3 additions & 1 deletion buoy/models/amplfi.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ def __init__(
)

parser = ArgumentParser()
parser.add_class_arguments(AmplfiConfig, fail_untyped=False)
parser.add_class_arguments(
AmplfiConfig, fail_untyped=False, sub_configs=True
)
parser.link_arguments(
"inference_params",
"architecture.init_args.num_params",
Expand Down
50 changes: 39 additions & 11 deletions buoy/utils/data.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import warnings
from pathlib import Path

import gwosc
Expand All @@ -10,6 +11,12 @@
from huggingface_hub.errors import EntryNotFoundError
from ligo.gracedb.rest import GraceDb

STRAIN_CHANNELS = {
"H1": "H1:GDS-CALIB_STRAIN_CLEAN",
"L1": "L1:GDS-CALIB_STRAIN_CLEAN",
"V1": "V1:Hrec_hoft_16384Hz",
}


def get_local_or_hf(
filename: Path,
Expand Down Expand Up @@ -97,6 +104,7 @@ def slice_amplfi_data(
def get_data(
event: str,
sample_rate: float,
psd_length: float,
datadir: Path,
):
if event.startswith("GW"):
Expand All @@ -120,26 +128,44 @@ def get_data(
"(e.g. G123456 or S123456)."
)

# Make sure things start at an integer time for consistency.
# Take data from psd_length * (-1.5, 0.5) around the event
# time to make sure there's enough for analysis. This isn't
# totally robust, but should be good for most use cases.
offset = event_time % 1
start = event_time - 96 - offset
end = event_time + 32 - offset

if ifos not in [["H1", "L1"], ["H1", "L1", "V1"]]:
raise ValueError(
f"Event {event} does not have the required detectors. "
f"Expected ['H1', 'L1'] or ['H1', 'L1', 'V1'], got {ifos}"
)
start = event_time - 1.5 * psd_length - offset
end = event_time + 0.5 * psd_length - offset

datafile = datadir / f"{event}.hdf5"
if not datafile.exists():
logging.info(
"Fetching open data from GWOSC between GPS times "
"Fetching data from between GPS times "
f"{start} and {end} for {ifos}"
)

ts_dict = TimeSeriesDict()
for ifo in ifos:
ts_dict[ifo] = TimeSeries.fetch_open_data(ifo, start, end)
if start < 1269363618:
ts_dict[ifo] = TimeSeries.fetch_open_data(ifo, start, end)
else:
ts_dict[ifo] = TimeSeries.get(STRAIN_CHANNELS[ifo], start, end)

span = ts_dict[ifo].span
if span.end - span.start < 128:
ts_dict.pop(ifo)
warnings.warn(
f"Detector {ifo} did not have sufficient data surrounding "
"the event time, removing it from the dataset",
stacklevel=2,
)

ifos = list(ts_dict.keys())
logging.info(f"Fetched data for detectors {ifos}")
if ifos not in [["H1", "L1"], ["H1", "L1", "V1"]]:
raise ValueError(
f"Event {event} does not have the required detectors. "
f"Expected ['H1', 'L1'] or ['H1', 'L1', 'V1'], got {ifos}"
)
ts_dict = ts_dict.resample(sample_rate)

logging.info(f"Saving data to file {datafile}")
Expand All @@ -154,10 +180,12 @@ def get_data(
data = np.stack([ts_dict[ifo].value for ifo in ifos])[None]

else:
logging.info(f"Loading {ifos} data from file for event {event}")
logging.info(f"Loading data from file for event {event}")
with h5py.File(datafile, "r") as f:
ifos = list(f.keys())
data = np.stack([f[ifo][:] for ifo in ifos])[None]
event_time = f.attrs["tc"]
t0 = f.attrs["t0"]
logging.info(f"Loaded data for detectors {ifos}")

return data, ifos, t0, event_time
2 changes: 1 addition & 1 deletion buoy/utils/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def generate_html(
label (str): Title for the HTML page.
"""
html_content = html_header(label)
for image_path in plotdir.glob("*.png"):
for image_path in sorted(plotdir.glob("*.png")):
caption = image_path.stem
caption = caption.replace("_", " ")
caption = (
Expand Down
Loading