Skip to content

Commit

Permalink
BUG: fixup POSIX-only path handling that can easily be made cross-pla…
Browse files Browse the repository at this point in the history
…tform
  • Loading branch information
neutrinoceros committed Feb 14, 2022
1 parent 03c41c0 commit 35ce91c
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 35 deletions.
12 changes: 9 additions & 3 deletions yt/frontends/art/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,9 @@ def _find_files(self, file_amr):
"""
base_prefix, base_suffix = filename_pattern["amr"]
numericstr = file_amr.rsplit("_", 1)[1].replace(base_suffix, "")
possibles = glob.glob(os.path.dirname(os.path.abspath(file_amr)) + "/*")
possibles = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(file_amr)), "*")
)
for filetype, (prefix, suffix) in filename_pattern.items():
# if this attribute is already set skip it
if getattr(self, "_file_" + filetype, None) is not None:
Expand Down Expand Up @@ -468,7 +470,9 @@ def _find_files(self, file_particle):
"""
base_prefix, base_suffix = filename_pattern["particle_data"]
aexpstr = file_particle.rsplit("s0", 1)[1].replace(base_suffix, "")
possibles = glob.glob(os.path.dirname(os.path.abspath(file_particle)) + "/*")
possibles = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(file_particle)), "*")
)
for filetype, (prefix, suffix) in filename_pattern.items():
# if this attribute is already set skip it
if getattr(self, "_file_" + filetype, None) is not None:
Expand Down Expand Up @@ -682,7 +686,9 @@ def _is_valid(cls, filename, *args, **kwargs):
with open(f, "rb") as fh:
try:
amr_prefix, amr_suffix = filename_pattern["amr"]
possibles = glob.glob(os.path.dirname(os.path.abspath(f)) + "/*")
possibles = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(f)), "*")
)
for possible in possibles:
if possible.endswith(amr_suffix):
if os.path.basename(possible).startswith(amr_prefix):
Expand Down
18 changes: 9 additions & 9 deletions yt/frontends/boxlib/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ def _setup_data_io(self):
self.io = io_registry[self.dataset_type](self.dataset)

def _determine_particle_output_type(self, directory_name):
header_filename = self.ds.output_dir + "/" + directory_name + "/Header"
header_filename = os.path.join(self.ds.output_dir, directory_name, "Header")
with open(header_filename) as f:
version_string = f.readline().strip()
if version_string.startswith("Version_Two"):
Expand Down Expand Up @@ -1305,7 +1305,7 @@ def _guess_pcast(vals):


def _read_raw_field_names(raw_file):
header_files = glob.glob(raw_file + "*_H")
header_files = glob.glob(os.path.join(raw_file, "*_H"))
return [hf.split(os.sep)[-1][:-2] for hf in header_files]


Expand All @@ -1325,15 +1325,15 @@ def _get_active_dimensions(box):


def _read_header(raw_file, field):
level_files = glob.glob(raw_file + "Level_*")
level_files = glob.glob(os.path.join(raw_file, "Level_*"))
level_files.sort()

all_boxes = []
all_file_names = []
all_offsets = []

for level_file in level_files:
header_file = level_file + "/" + field + "_H"
header_file = os.path.join(level_file, field + "_H")
with open(header_file) as f:

f.readline() # version
Expand Down Expand Up @@ -1437,7 +1437,7 @@ def __init__(self, ds, dataset_type="boxlib_native"):
self._read_particles(ptype, is_checkpoint)

# Additional WarpX particle information (used to set up species)
self.warpx_header = WarpXHeader(self.ds.output_dir + "/WarpXHeader")
self.warpx_header = WarpXHeader(os.path.join(self.ds.output_dir, "WarpXHeader"))

for key, val in self.warpx_header.data.items():
if key.startswith("species_"):
Expand All @@ -1451,8 +1451,8 @@ def _detect_output_fields(self):
super()._detect_output_fields()

# now detect the optional, non-cell-centered fields
self.raw_file = self.ds.output_dir + "/raw_fields/"
self.raw_fields = _read_raw_field_names(self.raw_file + "Level_0/")
self.raw_file = os.path.join(self.ds.output_dir, "raw_fields")
self.raw_fields = _read_raw_field_names(os.path.join(self.raw_file, "Level_0"))
self.field_list += [("raw", f) for f in self.raw_fields]
self.raw_field_map = {}
self.ds.nodal_flags = {}
Expand Down Expand Up @@ -1531,7 +1531,7 @@ def _parse_parameter_file(self):
pass
self._periodicity = tuple(periodicity)

particle_types = glob.glob(self.output_dir + "/*/Header")
particle_types = glob.glob(os.path.join(self.output_dir, "*", "Header"))
particle_types = [cpt.split(os.sep)[-2] for cpt in particle_types]
if len(particle_types) > 0:
self.parameters["particles"] = 1
Expand Down Expand Up @@ -1590,7 +1590,7 @@ def __init__(

def _parse_parameter_file(self):
super()._parse_parameter_file()
particle_types = glob.glob(self.output_dir + "/*/Header")
particle_types = glob.glob(os.path.join(self.output_dir, "*", "Header"))
particle_types = [cpt.split(os.sep)[-2] for cpt in particle_types]
if len(particle_types) > 0:
self.parameters["particles"] = 1
Expand Down
6 changes: 3 additions & 3 deletions yt/frontends/boxlib/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def _read_raw_field(self, grid, field):
offset_list = self.ds.index.raw_field_map[field_name][2]

lev = grid.Level
filename = base_dir + "Level_%d/" % lev + fn_list[grid.id]
filename = os.path.join(base_dir, f"Level_{lev}", fn_list[grid.id])
offset = offset_list[grid.id]
box = box_list[grid.id]

Expand Down Expand Up @@ -196,7 +196,7 @@ class IOHandlerOrion(IOHandlerBoxlib, IOHandlerParticlesBoxlibMixin):

@property
def particle_filename(self):
fn = self.ds.output_dir + "/StarParticles"
fn = os.path.join(self.ds.output_dir, "StarParticles")
if not os.path.exists(fn):
fn = self.ds.output_dir + "/SinkParticles"
fn = os.path.join(self.ds.output_dir, "SinkParticles")
return fn
2 changes: 1 addition & 1 deletion yt/frontends/owls/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def _get_owls_ion_data_dir(self):

if not os.path.exists(owls_ion_path):
mylog.info(txt, data_url, data_dir)
fname = data_dir + "/" + data_file
fname = os.path.join(data_dir, data_file)
download_file(os.path.join(data_url, data_file), fname)

cmnd = f"cd {data_dir}; tar xf {data_file}"
Expand Down
33 changes: 14 additions & 19 deletions yt/utilities/grid_data_format/conversion/conversion_athena.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
from glob import glob

import numpy as np
Expand All @@ -24,7 +25,7 @@ def __init__(self, basename, outname=None, source_dir=None, field_conversions=No
self.ddn = int(name[1])
if source_dir is None:
source_dir = "./"
self.source_dir = source_dir + "/"
self.source_dir = source_dir
self.basename = name[0]
if outname is None:
outname = self.basename + ".%04i" % self.ddn + ".gdf"
Expand Down Expand Up @@ -82,7 +83,7 @@ def write_gdf_field(self, fn, grid_number, field, data):

def read_and_write_index(self, basename, ddn, gdf_name):
"""Read Athena legacy vtk file from multiple cpus"""
proc_names = glob(self.source_dir + "id*")
proc_names = glob(os.path.join(self.source_dir, "id*"))
# print('Reading a dataset from %i Processor Files' % len(proc_names))
N = len(proc_names)
grid_dims = np.empty([N, 3], dtype="int64")
Expand All @@ -94,15 +95,12 @@ def read_and_write_index(self, basename, ddn, gdf_name):

for i in range(N):
if i == 0:
fn = self.source_dir + "id%i/" % i + basename + ".%04i" % ddn + ".vtk"
fn = os.path.join(
self.source_dir, f"id{i}", basename + f".{ddn:04d}.vtk"
)
else:
fn = (
self.source_dir
+ "id%i/" % i
+ basename
+ "-id%i" % i
+ ".%04i" % ddn
+ ".vtk"
fn = os.path.join(
self.source_dir, f"id{i}", basename + f"-id{i}.{ddn:04d}.vtk"
)

print(f"Reading file {fn}")
Expand Down Expand Up @@ -205,20 +203,17 @@ def read_and_write_index(self, basename, ddn, gdf_name):
# f.close()

def read_and_write_data(self, basename, ddn, gdf_name):
proc_names = glob(self.source_dir + "id*")
proc_names = glob(os.path.join(self.source_dir, "id*"))
# print('Reading a dataset from %i Processor Files' % len(proc_names))
N = len(proc_names)
for i in range(N):
if i == 0:
fn = self.source_dir + "id%i/" % i + basename + ".%04i" % ddn + ".vtk"
fn = os.path.join(
self.source_dir, f"id{i}", basename + f".{ddn:04d}.vtk"
)
else:
fn = (
self.source_dir
+ "id%i/" % i
+ basename
+ "-id%i" % i
+ ".%04i" % ddn
+ ".vtk"
fn = os.path.join(
self.source_dir, +f"id{i}", basename + f"-id{i}.{ddn:04d}.vtk"
)
f = open(fn, "rb")
# print('Reading data from %s' % fn)
Expand Down

0 comments on commit 35ce91c

Please sign in to comment.