Skip to content

Commit

Permalink
Add faceted dim csv (#2)
Browse files Browse the repository at this point in the history
* Add faceted dim csv

* Bump coverage
  • Loading branch information
maxwelllevin committed Sep 26, 2023
1 parent 40dd3ec commit c5e1b8f
Show file tree
Hide file tree
Showing 8 changed files with 284 additions and 40 deletions.
2 changes: 2 additions & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ source =
./src
omit =
./env/*
_version.py
__main__.py

[report]
exclude_lines =
Expand Down
18 changes: 9 additions & 9 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
.PHONY: build
build:
rm -rf dist/
python -m build
pip install dist/*.whl
rm -rf dist/ \
&& python -m build \
&& pip install dist/*.whl

coverage:
coverage run -m pytest
coverage html
open htmlcov/index.html
coverage run -m pytest \
&& coverage html \
&& open htmlcov/index.html

format:
ruff . --fix --ignore E501 --per-file-ignores="__init__.py:F401"
isort .
black .
ruff . --fix --ignore E501 --per-file-ignores="__init__.py:F401" \
&& isort . \
&& black .
3 changes: 2 additions & 1 deletion src/ncconvert/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
)
sys.exit(1)

from .csv import to_csv, to_csv_collection
from .csv import to_csv, to_csv_collection, to_faceted_dim_csv
from .parquet import to_parquet, to_parquet_collection


Expand All @@ -39,6 +39,7 @@ def __call__(
# Register to_* methods as options. For now this is a manual process
AVAILABLE_METHODS: Dict[str, Converter] = {
to_csv.__name__: to_csv,
to_faceted_dim_csv.__name__: to_faceted_dim_csv,
to_csv_collection.__name__: to_csv_collection,
to_parquet.__name__: to_parquet,
to_parquet_collection.__name__: to_parquet_collection,
Expand Down
25 changes: 24 additions & 1 deletion src/ncconvert/csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@

import xarray as xr

from .utils import _dump_metadata, _to_dataframe, _to_dataframe_collection
from .utils import (
_dump_metadata,
_to_dataframe,
_to_dataframe_collection,
_to_faceted_dim_dataframe,
)


def to_csv(
Expand Down Expand Up @@ -89,3 +94,21 @@ def to_csv_collection(
metadata_path = _dump_metadata(dataset, filepath) if metadata else None

return tuple(filepaths), metadata_path


def to_faceted_dim_csv(
dataset: xr.Dataset,
filepath: str | Path,
metadata: bool = True,
**kwargs: Any,
) -> tuple[Path, Path | None]:
to_csv_kwargs = kwargs.get("to_csv_kwargs", {})

Path(filepath).parent.mkdir(parents=True, exist_ok=True)

filepath, df = _to_faceted_dim_dataframe(dataset, filepath, ".csv")
df.to_csv(filepath, **to_csv_kwargs) # type: ignore

metadata_path = _dump_metadata(dataset, filepath) if metadata else None

return Path(filepath), metadata_path
102 changes: 98 additions & 4 deletions src/ncconvert/utils.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
from __future__ import annotations

import json
import logging
from collections import defaultdict
from pathlib import Path

import pandas as pd
import xarray as xr

logger = logging.getLogger(__name__)


def _dump_metadata(dataset: xr.Dataset, filepath: str | Path) -> Path:
metadata = dataset.to_dict(data=False, encoding=True)
Expand All @@ -19,8 +22,7 @@ def _dump_metadata(dataset: xr.Dataset, filepath: str | Path) -> Path:
def _to_dataframe(
dataset: xr.Dataset, filepath: str | Path, extension: str
) -> tuple[Path, pd.DataFrame]:
if not extension.startswith("."):
extension = "." + extension
extension = extension if extension.startswith(".") else "." + extension

df = dataset.to_dataframe(dim_order=list(dataset.dims))

Expand All @@ -32,8 +34,7 @@ def _to_dataframe_collection(
) -> tuple[tuple[Path, pd.DataFrame], ...]:
outputs: list[tuple[Path, pd.DataFrame]] = []

if extension.startswith("."):
extension = extension[1:]
extension = extension[1:] if extension.startswith(".") else extension

# Get variable dimension groupings
dimension_groups: dict[tuple[str, ...], list[str]] = defaultdict(list)
Expand All @@ -56,3 +57,96 @@ def _to_dataframe_collection(
outputs.append((dim_group_path, df))

return tuple(outputs)


def _to_faceted_dim_dataframe(
dataset: xr.Dataset, filepath: str | Path, extension: str
) -> tuple[Path, pd.DataFrame]:
extension = extension if extension.startswith(".") else "." + extension

# Get variable dimension groupings
dimension_groups: dict[tuple[str, ...], list[str]] = defaultdict(list)
for var_name, data_var in dataset.data_vars.items():
dims = tuple(str(d) for d in data_var.dims)
if len(dims) > 2:
logger.error(
(
"Variable %s has more than 2 dimensions and will not be supported."
" Dims: %s"
),
var_name,
dims,
)
continue
elif len(dims) == 2 and "time" not in dims:
logger.error(
(
"2D variables are only supported when 'time' is one of its"
" dimensions. Found variable %s with dimensions: %s."
),
var_name,
dims,
)
continue
dimension_groups[dims].append(var_name)

ds = dataset[["time"]].copy()
for dims, var_list in dimension_groups.items():
# simple case
if dims == ("time",):
ds.update(dataset[var_list])
continue

shape = dataset[var_list[0]].shape

# If scalar, expand to make time the first dimension
if not shape:
_tmp = dataset[var_list].expand_dims({"time": dataset["time"]})
ds.update(_tmp[var_list])
continue

_tmp = dataset[var_list]

# If 1D, expand to make time a dimension (2D)
if len(shape) == 1:
_tmp = _tmp.expand_dims({"time": dataset["time"]})

# For 2D, make time the first dimension and flatten the second
new_dims = ("time", [d for d in dims if d != "time"][0])
_tmp = _tmp.transpose(*new_dims)
_tmp = _flatten_dataset(_tmp, new_dims[1])
ds = ds.merge(_tmp)

df = ds.to_dataframe()

return Path(filepath).with_suffix(extension), df


def _flatten_dataset(ds: xr.Dataset, second_dim: str) -> xr.Dataset:
"""Transforms a 2D dataset into 1D by adding variables for each value of the second
dimension. The first dimension must be 'time'.
Args:
ds (xr.Dataset): The dataset to flatten. Must only contain two dimensions/coords
and only the variables to flatten.
Returns:
xr.Dataset: The flattened dataset. Preserves attributes.
"""

output = ds[["time"]]

dim_values = ds[second_dim].values

dim_units = ds[second_dim].attrs.get("units")
if not dim_units or dim_units == "1":
dim_units = ""

dim_suffixes = [f"{dim_val}{dim_units}" for dim_val in dim_values]

for var_name, data in ds.data_vars.items():
for i, suffix in enumerate(dim_suffixes):
output[f"{var_name}_{suffix}"] = data[:, i]

output = output.drop_vars(second_dim) # remove from coords
return output
73 changes: 72 additions & 1 deletion test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def dataset() -> xr.Dataset:
),
"height": (
"height",
[0.0, 10.0, 20.0, 30.0],
[0, 10, 20, 30],
{"units": "m", "long_name": "Height AGL"},
),
},
Expand Down Expand Up @@ -54,3 +54,74 @@ def dataset() -> xr.Dataset:
"data_level": "c1",
},
)


@pytest.fixture(autouse=True, scope="module")
def bad_dataset() -> xr.Dataset:
return xr.Dataset(
coords={
"time": (
"time",
pd.date_range(
"2022-04-05",
"2022-04-06",
periods=3 + 1,
inclusive="left",
), # type: ignore
{"units": "Seconds since 1970-01-01 00:00:00"},
),
"range": (
"range",
[1, 2, 3],
{},
),
"height": (
"height",
[0, 10, 20, 30],
{"units": "m", "long_name": "Height AGL"},
),
},
data_vars={
"temperature": (
("range", "height"),
[
[88, 80, 75, 70],
[89, 81, 76, 71],
[88.5, 81.5, 75.5, 69.5],
],
{"units": "degF", "_FillValue": -9999.0},
),
"too_large": (
("time", "range", "height"),
[
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
],
{"units": "1", "_FillValue": -9999.0},
),
"humidity": (
"range",
[60.5, 65.5, 63],
{"units": "%", "_FillValue": -9999.0},
),
"other": (
"height",
[1, 2, 3, 4],
{"units": "1", "_FillValue": -9999.0},
),
"time_var": (
"time",
[1, 2, 3],
{"units": "1", "_FillValue": -9999.0},
),
},
attrs={
"datastream": "bad.buoy.c1",
"title": "title",
"description": "description",
"location_id": "bad",
"dataset_name": "buoy",
"data_level": "c1",
},
)
53 changes: 29 additions & 24 deletions test/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,30 +37,35 @@ def test_convert_cli(dataset: xr.Dataset):

runner = CliRunner()

with runner.isolated_filesystem():
dataset.to_netcdf("test.20220405.000000.nc")
dataset.to_netcdf("test.20220405.001200.nc")
dataset.to_netcdf("test.20220406.000000.nc")
dataset.to_netcdf("test.20220406.001200.nc")
dataset.to_netcdf("test.20220410.000000.nc")
dataset.to_netcdf("test.20220410.001200.nc")
dataset.to_netcdf("test.20220420.000000.nc")
dataset.close()
for verbosity in ["--verbose", "--no-verbose"]:
with runner.isolated_filesystem():
dataset.to_netcdf("test.20220405.000000.nc")
dataset.to_netcdf("test.20220405.001200.nc")
dataset.to_netcdf("test.20220406.000000.nc")
dataset.to_netcdf("test.20220406.001200.nc")
dataset.to_netcdf("test.20220410.000000.nc")
dataset.to_netcdf("test.20220410.001200.nc")
dataset.to_netcdf("test.20220420.000000.nc")
dataset.close()

result = runner.invoke(
app,
args=(
"to_csv",
"test.2022040*.nc",
"test.2022041*.nc",
"test.20220420.000000.nc",
"--output-dir",
"outputs",
"--verbose",
),
)
result = runner.invoke(
app,
args=(
"to_csv",
"test.2022040*.nc",
"test.2022041*.nc",
"test.20220420.000000.nc",
"--output-dir",
"outputs",
verbosity,
),
)

assert result.exit_code == 0
assert result.exit_code == 0
if verbosity == "--no-verbose":
assert result.stdout == ""
else:
assert result.stdout != ""

assert len(list(Path("./outputs").glob("*.csv"))) == 7
assert len(list(Path("./outputs").glob("*.json"))) == 7
assert len(list(Path("./outputs").glob("*.csv"))) == 7
assert len(list(Path("./outputs").glob("*.json"))) == 7

0 comments on commit c5e1b8f

Please sign in to comment.