diff --git a/src/pypromice/process/get_l2.py b/src/pypromice/process/get_l2.py index 5466fd19..9570c930 100644 --- a/src/pypromice/process/get_l2.py +++ b/src/pypromice/process/get_l2.py @@ -3,6 +3,8 @@ from argparse import ArgumentParser import pypromice from pypromice.process.aws import AWS +from pypromice.process.write import prepare_and_write +from pypromice.process.load import getVars, getMeta def parse_arguments_l2(): parser = ArgumentParser(description="AWS L2 processor") @@ -55,8 +57,10 @@ def get_l2(): # Write out level 2 if args.outpath is not None: - aws.writeL2(args.outpath) - + if aws.L2.attrs['format'] == 'raw': + prepare_and_write(aws.L2, args.outpath, getVars(), getMeta(), '10min') + prepare_and_write(aws.L2, args.outpath, getVars(), getMeta(), '60min') + if __name__ == "__main__": get_l2() diff --git a/src/pypromice/process/get_l2tol3.py b/src/pypromice/process/get_l2tol3.py index 8e666b1f..7e501404 100644 --- a/src/pypromice/process/get_l2tol3.py +++ b/src/pypromice/process/get_l2tol3.py @@ -44,6 +44,11 @@ def get_l2tol3(): # Define Level 2 dataset from file l2 = xr.open_dataset(args.inpath) + if 'bedrock' in l2.attrs.keys(): + l2.attrs['bedrock'] = l2.attrs['bedrock'] == 'True' + if 'number_of_booms' in l2.attrs.keys(): + l2.attrs['number_of_booms'] = int(l2.attrs['number_of_booms']) + # Perform Level 3 processing l3 = toL3(l2) diff --git a/src/pypromice/process/join_levels.py b/src/pypromice/process/join_levels.py index 9f03e810..4beab4e8 100644 --- a/src/pypromice/process/join_levels.py +++ b/src/pypromice/process/join_levels.py @@ -24,18 +24,23 @@ def parse_arguments_join(): return args def loadArr(infile): - if infile.split('.')[-1].lower() in 'csv': + print(infile) + if infile.split('.')[-1].lower() == 'csv': df = pd.read_csv(infile, index_col=0, parse_dates=True) ds = xr.Dataset.from_dataframe(df) - - elif infile.split('.')[-1].lower() in 'nc': + elif infile.split('.')[-1].lower() == 'nc': ds = xr.open_dataset(infile) - + try: - name = ds.attrs['station_name'] + name = ds.attrs['station_id'] except: name = infile.split('/')[-1].split('.')[0].split('_hour')[0].split('_10min')[0] - + ds.attrs['station_id'] = name + if 'bedrock' in ds.attrs.keys(): + ds.attrs['bedrock'] = ds.attrs['bedrock'] == 'True' + if 'number_of_booms' in ds.attrs.keys(): + ds.attrs['number_of_booms'] = int(ds.attrs['number_of_booms']) + print(f'{name} array loaded from {infile}') return ds, name @@ -88,16 +93,13 @@ def join_levels(): else: print(f'Invalid files {args.file1}, {args.file2}') exit() - - # Define output directory subfolder - out = os.path.join(args.outpath, name) - + + # Resample to hourly, daily and monthly datasets and write to file - prepare_and_write(all_ds, out, v, m, '60min') - prepare_and_write(all_ds, out, v, m, '1D') - prepare_and_write(all_ds, out, v, m, 'M') - - print(f'Files saved to {os.path.join(out, name)}...') + prepare_and_write(all_ds, args.outpath, v, m, '60min') + # prepare_and_write(all_ds, out, v, m, '1D') + # prepare_and_write(all_ds, out, v, m, 'M') + print(f'Files saved to {os.path.join(args.outpath, name)}...') if __name__ == "__main__": join_levels()