From 5d6b5ede35f9ca415466c8a5df7589a34f0f6152 Mon Sep 17 00:00:00 2001 From: Glenn SHarman Date: Thu, 28 Apr 2022 10:21:07 -0500 Subject: [PATCH] Updated file paths --- 01_Make_models.ipynb | 61 +++++++++------- 02_Make_predictions.ipynb | 145 +++++++++++++++++++------------------- 2 files changed, 108 insertions(+), 98 deletions(-) diff --git a/01_Make_models.ipynb b/01_Make_models.ipynb index 198b4d5..6ba328d 100755 --- a/01_Make_models.ipynb +++ b/01_Make_models.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "03d50317", + "id": "successful-responsibility", "metadata": {}, "source": [ "## Code accompanyment to\"Machine learning applied to a modern-Pleistocene petrographic dataset: The global prediction of sand modal composition (GloPrSM) model\"\n", @@ -13,7 +13,7 @@ }, { "cell_type": "markdown", - "id": "61b900a0", + "id": "stone-headset", "metadata": {}, "source": [ "## Step 1: Load sand modal composition data and make random forests models" @@ -21,7 +21,7 @@ }, { "cell_type": "markdown", - "id": "8fd11344", + "id": "differential-exposure", "metadata": {}, "source": [ "### Import required modules" @@ -30,7 +30,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1163c19f", + "id": "tender-heather", "metadata": {}, "outputs": [], "source": [ @@ -44,12 +44,13 @@ "from sklearn.metrics import *\n", "import time\n", "import pickle\n", - "import pathlib" + "import pathlib\n", + "import os" ] }, { "cell_type": "markdown", - "id": "ca3bdcdc", + "id": "hollow-contribution", "metadata": {}, "source": [ "### Load dependent variable data" @@ -58,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "86b7b6be", + "id": "inclusive-moral", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "9fde66b0", + "id": "prescription-furniture", "metadata": {}, "source": [ "### Feature selection" @@ -77,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bf001f1e", + "id": "pointed-blink", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +90,7 @@ }, { "cell_type": "markdown", - "id": "4219c186", + "id": "suffering-black", "metadata": {}, "source": [ "### Feature correlation (optional)" @@ -98,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "408ed37d", + "id": "voluntary-accuracy", "metadata": { "scrolled": true }, @@ -114,7 +115,7 @@ }, { "cell_type": "markdown", - "id": "d2edafd3", + "id": "selective-major", "metadata": {}, "source": [ "### Model training\n", @@ -124,16 +125,16 @@ { "cell_type": "code", "execution_count": null, - "id": "bed73d58", + "id": "spectacular-formation", "metadata": {}, "outputs": [], "source": [ - "# Specify output folder\n", - "base_path = r'Z:\\Sharman\\GloPrSM_git\\v1.0'\n", + "# Specify output folder (current directory used by default)\n", + "base_path = os.getcwd() + '/v1.0'\n", "\n", - "model_path = base_path + '\\\\' + 'models'\n", - "val_path = base_path + '\\\\' + 'validation'\n", - "test_path = base_path + '\\\\' + 'test_labels'\n", + "model_path = base_path + '/' + 'models'\n", + "val_path = base_path + '/' + 'validation'\n", + "test_path = base_path + '/' + 'test_labels'\n", "\n", "# Recursively creates the directory and does not raise an exception if the directory already exists\n", "pathlib.Path(model_path).mkdir(parents=True, exist_ok=True)\n", @@ -144,13 +145,13 @@ { "cell_type": "code", "execution_count": null, - "id": "e727d4dd", + "id": "technological-villa", "metadata": {}, "outputs": [], "source": [ "labels = ['FQ_QFL_IJ', 'LQ_QFL_IJ', 'QmQch_QmQpQch_IJ', 'QpQch_QmQpQch_IJ', 'FkFp_FpFk_IJ', 'LsLv_LvLsLm_IJ', 'LmLv_LvLsLm_IJ']\n", "\n", - "splits = 10 # Note, 100 splits are used in the article\n", + "splits = 1 # Note, 100 splits are used in the article\n", "rs = ShuffleSplit(n_splits=splits, test_size=.2, random_state=0)\n", "stats = np.zeros(shape=(splits,len(labels)))\n", "\n", @@ -184,27 +185,35 @@ " tst_df.loc[:,'{}_label_{}'.format(label, i)] = test_labels\n", "\n", " # Export the validation results\n", - " val_df.to_csv(val_path+'\\\\'+'{}_validation_rlf.csv'.format(label),index=False)\n", - " tst_df.to_csv(test_path+'\\\\'+'{}_label_rlf.csv'.format(label),index=False)\n", + " val_df.to_csv(val_path+'/'+'{}_validation_rlf.csv'.format(label),index=False)\n", + " tst_df.to_csv(test_path+'/'+'{}_label_rlf.csv'.format(label),index=False)\n", " \n", " # Save the model\n", " model_filename = 'model_'+str(i)+'.sav'\n", - " model_filepath = model_path+'\\\\'+str(label)\n", + " model_filepath = model_path+'/'+str(label)\n", " pathlib.Path(model_filepath).mkdir(parents=True, exist_ok=True) # Recursively creates the directory and does not raise an exception if the directory already exists\n", - " pickle.dump(model, open(model_filepath+'\\\\'+model_filename, 'wb'))\n", + " pickle.dump(model, open(model_filepath+'/'+model_filename, 'wb'))\n", " \n", " print(i, 'R2: {}, {} sec'.format(round(r2,6), round(time.time()-start,1))) \n", " i += 1\n", " print()\n", "\n", "r2_df = pd.DataFrame(stats, columns=[x+'_R2' for x in labels])\n", - "r2_df.to_csv(base_path+'\\\\'+'R2_stats_rlf.csv',index=False)\n", + "r2_df.to_csv(base_path+'/'+'R2_stats_rlf.csv',index=False)\n", "\n", "# Save the list of features used in the models, so you know what is going on\n", "features = pd.DataFrame()\n", "features['Inputs'] = feature_list\n", - "features.to_csv(base_path+'\\\\'+'feature_list.csv', index=False)" + "features.to_csv(base_path+'/'+'feature_list.csv', index=False)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "referenced-friend", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/02_Make_predictions.ipynb b/02_Make_predictions.ipynb index 60391c6..70e2a7e 100755 --- a/02_Make_predictions.ipynb +++ b/02_Make_predictions.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "e2465bac", + "id": "quiet-angel", "metadata": {}, "source": [ "## Code accompanyment to\"Machine learning applied to a modern-Pleistocene petrographic dataset: The global prediction of sand modal composition (GloPrSM) model\"\n", @@ -13,7 +13,7 @@ }, { "cell_type": "markdown", - "id": "bbe79294", + "id": "attractive-bridge", "metadata": {}, "source": [ "## Step 2: Load previously saved random forest models and generate predictions" @@ -21,7 +21,7 @@ }, { "cell_type": "markdown", - "id": "00ef43af", + "id": "employed-phrase", "metadata": {}, "source": [ "### Import required modules" @@ -30,7 +30,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c168df01", + "id": "literary-picking", "metadata": {}, "outputs": [], "source": [ @@ -49,7 +49,7 @@ }, { "cell_type": "markdown", - "id": "59dfb597", + "id": "devoted-universe", "metadata": {}, "source": [ "### Function to compute inverse log-ratio transformation" @@ -58,7 +58,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94843609", + "id": "departmental-jumping", "metadata": {}, "outputs": [], "source": [ @@ -94,7 +94,7 @@ }, { "cell_type": "markdown", - "id": "4990a54f", + "id": "collected-delicious", "metadata": {}, "source": [ "### Provide all inputs needed to run the code\n", @@ -104,21 +104,21 @@ { "cell_type": "code", "execution_count": null, - "id": "9bf11cd0", + "id": "optimum-williams", "metadata": {}, "outputs": [], "source": [ - "# Specify output folder used in Step 1\n", - "base_path = r'Z:\\Sharman\\GloPrSM_git\\v1.0'\n", + "# Specify output folder used in Step 1 (current directory used by default)\n", + "base_path = os.getcwd() + '/v1.0'\n", "\n", "# Base directory of where to save results (recommended same as base_path)\n", - "output_dir = r'Z:\\Sharman\\GloPrSM_git\\v1.0'\n", + "output_dir = os.getcwd() + '/v1.0'\n", "\n", "# CSV of independent variables in upstream mapped drainages\n", "wtrshd_ind_var = 'Watershed_output_lv8_ML_input.csv'\n", "\n", "# Feature list exported when saving the models in Step 1\n", - "feature_list_dir = base_path + '\\\\' + 'feature_list.csv'\n", + "feature_list_dir = base_path + '/' + 'feature_list.csv'\n", "\n", "# The labels of the log ratios you would like to predict\n", "labels = ['FQ_QFL_IJ', 'LQ_QFL_IJ', 'QmQch_QmQpQch_IJ', 'QpQch_QmQpQch_IJ', 'FkFp_FpFk_IJ', 'LsLv_LvLsLm_IJ', 'LmLv_LvLsLm_IJ']\n", @@ -136,7 +136,7 @@ }, { "cell_type": "markdown", - "id": "847a860b", + "id": "destroyed-hungarian", "metadata": {}, "source": [ "### Load necessary files" @@ -145,7 +145,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5a1df20c", + "id": "complex-disposal", "metadata": {}, "outputs": [], "source": [ @@ -162,12 +162,12 @@ "print('There are',len(input_data),'watersheds to be predicted')\n", "\n", "# Export the results\n", - "input_data.to_csv(output_dir+'\\\\' + 'input_data.csv') # Save the input data to CSV" + "input_data.to_csv(output_dir +'/' + 'input_data.csv') # Save the input data to CSV" ] }, { "cell_type": "markdown", - "id": "3bd7b345", + "id": "persistent-float", "metadata": {}, "source": [ "### Load models and generate predictions\n", @@ -177,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a4364ceb", + "id": "sound-sewing", "metadata": { "tags": [] }, @@ -185,12 +185,12 @@ "source": [ "for label in labels:\n", " models_to_import = []\n", - " os.chdir(base_path+'\\\\'+'models'+'\\\\'+label)\n", + " os.chdir(base_path+'/'+'models'+'/'+label)\n", "\n", " var_df = pd.DataFrame()\n", "\n", " # Get the model file paths\n", - " for root, dirs, files in os.walk(base_path+'\\\\'+'models'+'\\\\'+label):\n", + " for root, dirs, files in os.walk(base_path+'/'+'models'+'/'+label):\n", " for file in glob.glob(\"*.sav\"):\n", " models_to_import.append(os.path.join(root,file))\n", " \n", @@ -205,14 +205,14 @@ " print('Finished:',models_to_import[i], 'Time: {} sec'.format(round(time.time()-start,1)))\n", "\n", " # Recursively creates the directory and does not raise an exception if the directory already exists\n", - " pathlib.Path(output_dir+'\\\\'+'variance_raw').mkdir(parents=True, exist_ok=True)\n", + " pathlib.Path(output_dir+'/'+'variance_raw').mkdir(parents=True, exist_ok=True)\n", " \n", - " var_df.to_csv(output_dir+'\\\\'+'variance_raw'+'\\{}_variance_raw_rlf.csv'.format(label),index=False)" + " var_df.to_csv(output_dir+'/'+'variance_raw'+'/{}_variance_raw_rlf.csv'.format(label),index=False)" ] }, { "cell_type": "markdown", - "id": "180a5fa4", + "id": "connected-literature", "metadata": { "tags": [] }, @@ -223,16 +223,16 @@ { "cell_type": "code", "execution_count": null, - "id": "bef24a6a", + "id": "interstate-instrument", "metadata": { "tags": [] }, "outputs": [], "source": [ - "os.chdir(output_dir+'\\\\'+'variance_raw')\n", + "os.chdir(output_dir+'/'+'variance_raw')\n", "files_to_import = []\n", "\n", - "for root, dirs, files in os.walk(output_dir+'\\\\'+'variance_raw'):\n", + "for root, dirs, files in os.walk(output_dir+'/'+'variance_raw'):\n", " for file in glob.glob(\"*.csv\"):\n", " files_to_import.append(os.path.join(root,file))\n", "\n", @@ -242,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "6895bbb1", + "id": "missing-slope", "metadata": {}, "source": [ "### Calculate log statistics" @@ -251,7 +251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "eb6269f9", + "id": "headed-regard", "metadata": {}, "outputs": [], "source": [ @@ -261,8 +261,8 @@ " \n", " var_df = pd.DataFrame()\n", " var_data = pd.read_csv(file)\n", - " #ratio = file.split('/')[-1].split('_v')[0] # For Mac\n", - " ratio = file.split('\\\\')[-1].split('_v')[0] # For PC\n", + " ratio = file.split('/')[-1].split('_v')[0] # For Mac\n", + " #ratio = file.split('\\\\')[-1].split('_v')[0] # For PC\n", " print('Currently calculating',ratio)\n", " \n", " var_df['HYBAS_ID'] = wtrshd_data['Wtrshd_ID']\n", @@ -301,15 +301,15 @@ " var_df.loc[:,'{}_p975'.format(ratio)] = var_p975\n", " \n", " # Recursively creates the directory and does not raise an exception if the directory already exists\n", - " pathlib.Path(output_dir+'\\\\'+'log_ratio_stats').mkdir(parents=True, exist_ok=True)\n", + " pathlib.Path(output_dir+'/'+'log_ratio_stats').mkdir(parents=True, exist_ok=True)\n", " \n", - " var_df.to_csv(output_dir+'\\\\'+'log_ratio_stats'+'\\{}_stats.csv'.format(ratio),index=False)\n", + " var_df.to_csv(output_dir+'/'+'log_ratio_stats'+'/{}_stats.csv'.format(ratio),index=False)\n", "print(time.time()-start)" ] }, { "cell_type": "markdown", - "id": "c17ed721", + "id": "periodic-yorkshire", "metadata": {}, "source": [ "### Group log ratio stats by individual statistics" @@ -318,14 +318,14 @@ { "cell_type": "code", "execution_count": null, - "id": "f400c34f", + "id": "urban-rates", "metadata": {}, "outputs": [], "source": [ - "os.chdir(output_dir+'\\\\'+'log_ratio_stats')\n", + "os.chdir(output_dir+'/'+'log_ratio_stats')\n", "files_to_import = []\n", "\n", - "for root, dirs, files in os.walk(output_dir+'\\\\'+'log_ratio_stats'):\n", + "for root, dirs, files in os.walk(output_dir+'/'+'log_ratio_stats'):\n", " for file in glob.glob(\"*.csv\"):\n", " files_to_import.append(os.path.join(root,file))\n", "\n", @@ -336,7 +336,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70d7e657", + "id": "rental-julian", "metadata": {}, "outputs": [], "source": [ @@ -349,12 +349,13 @@ { "cell_type": "code", "execution_count": null, - "id": "ab0740a6", + "id": "existing-vampire", "metadata": {}, "outputs": [], "source": [ "stats = ['avg','med','std','skew','min','max','p025','p25','p75','p975']\n", - "ratios = [file.split('\\\\')[-1].split('stats\\\\')[0] for file in files_to_import] # PC\n", + "ratios = [file.split('/')[-1].split('stats/')[0] for file in files_to_import] # Mac\n", + "#ratios = [file.split('\\\\')[-1].split('stats\\\\')[0] for file in files_to_import] # PC\n", "ratios = [x[:(len(x)-10)] for x in ratios]\n", "\n", "var_data_dict = {'LR_data' : ratio_csvs}\n", @@ -373,14 +374,14 @@ " print(stat, np.array(method_df).shape)\n", " \n", " # Recursively creates the directory and does not raise an exception if the directory already exists\n", - " pathlib.Path(output_dir+'\\\\'+'log_ratio_by_stat_type').mkdir(parents=True, exist_ok=True) \n", + " pathlib.Path(output_dir+'/'+'log_ratio_by_stat_type').mkdir(parents=True, exist_ok=True) \n", " \n", - " method_df.to_csv(output_dir+'\\\\'+'log_ratio_by_stat_type'+'\\GloPrSM_LR_{}.csv'.format(stat),index=False)" + " method_df.to_csv(output_dir+'/'+'log_ratio_by_stat_type'+'/GloPrSM_LR_{}.csv'.format(stat),index=False)" ] }, { "cell_type": "markdown", - "id": "33d1670e", + "id": "interior-payment", "metadata": {}, "source": [ "### Inverse transform to ternary values" @@ -389,14 +390,14 @@ { "cell_type": "code", "execution_count": null, - "id": "083e52a8", + "id": "administrative-gabriel", "metadata": {}, "outputs": [], "source": [ - "os.chdir(output_dir+'\\\\'+'log_ratio_by_stat_type')\n", + "os.chdir(output_dir+'/'+'log_ratio_by_stat_type')\n", "files_to_import = []\n", "\n", - "for root, dirs, files in os.walk(output_dir+'\\\\'+'log_ratio_by_stat_type'):\n", + "for root, dirs, files in os.walk(output_dir+'/'+'log_ratio_by_stat_type'):\n", " for file in glob.glob(\"*.csv\"):\n", " files_to_import.append(os.path.join(root,file))\n", "\n", @@ -407,7 +408,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5c579164", + "id": "behind-difficulty", "metadata": {}, "outputs": [], "source": [ @@ -422,8 +423,8 @@ "oct_tax_norm = np.zeros(shape=(len(wtrshd_data),11))\n", "\n", "# Recursively creates the directory and does not raise an exception if the directory already exists\n", - "pathlib.Path(output_dir+'\\\\'+'IJ_QFL_relative_vals').mkdir(parents=True, exist_ok=True) \n", - "pathlib.Path(output_dir+'\\\\'+'IJ_QFL_normalized_vals').mkdir(parents=True, exist_ok=True) \n", + "pathlib.Path(output_dir+'/'+'IJ_QFL_relative_vals').mkdir(parents=True, exist_ok=True) \n", + "pathlib.Path(output_dir+'/'+'IJ_QFL_normalized_vals').mkdir(parents=True, exist_ok=True) \n", "\n", "for i, file in enumerate(files_to_import[:]):\n", " data = pd.read_csv(file)\n", @@ -456,22 +457,22 @@ " var_df_norm = pd.DataFrame(oct_tax_norm, columns=[col+'_'+method for col in final_columns])\n", " var_df['HYBAS_ID'], var_df_norm['HYBAS_ID'] = data['HYBAS_ID'], data['HYBAS_ID']\n", " \n", - " var_df.to_csv(output_dir+'\\\\'+'IJ_QFL_relative_vals'+'\\octonary_var_{}.csv'.format(method),index=False)\n", - " var_df_norm.to_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_{}.csv'.format(method),index=False)\n", + " var_df.to_csv(output_dir+'/'+'IJ_QFL_relative_vals'+'/octonary_var_{}.csv'.format(method),index=False)\n", + " var_df_norm.to_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_{}.csv'.format(method),index=False)\n", " print()" ] }, { "cell_type": "code", "execution_count": null, - "id": "11ca7d02", + "id": "colonial-service", "metadata": {}, "outputs": [], "source": [ - "p025 = pd.read_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_p025.csv')\n", - "p25 = pd.read_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_p25.csv')\n", - "p75 = pd.read_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_p75.csv')\n", - "p975 = pd.read_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_p975.csv')\n", + "p025 = pd.read_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_p025.csv')\n", + "p25 = pd.read_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_p25.csv')\n", + "p75 = pd.read_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_p75.csv')\n", + "p975 = pd.read_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_p975.csv')\n", "inner_50 = pd.DataFrame()\n", "inner_95 = pd.DataFrame()\n", "\n", @@ -482,13 +483,13 @@ "\n", "inner_50['HYBAS_ID'] = data['HYBAS_ID']\n", "inner_95['HYBAS_ID'] = data['HYBAS_ID']\n", - "inner_50.to_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_i50.csv',index=False)\n", - "inner_95.to_csv(output_dir+'\\\\'+'IJ_QFL_normalized_vals'+'\\octonary_var_i95.csv',index=False)" + "inner_50.to_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_i50.csv',index=False)\n", + "inner_95.to_csv(output_dir+'/'+'IJ_QFL_normalized_vals'+'/octonary_var_i95.csv',index=False)" ] }, { "cell_type": "markdown", - "id": "2abafbe9", + "id": "twelve-douglas", "metadata": {}, "source": [ "### Export results as a CSV" @@ -497,14 +498,14 @@ { "cell_type": "code", "execution_count": null, - "id": "22df8367", + "id": "elder-guess", "metadata": {}, "outputs": [], "source": [ - "os.chdir(output_dir+'\\\\'+'IJ_QFL_normalized_vals')\n", + "os.chdir(output_dir+'/'+'IJ_QFL_normalized_vals')\n", "files_to_import = []\n", "\n", - "for root, dirs, files in os.walk(output_dir+'\\\\'+'IJ_QFL_normalized_vals'):\n", + "for root, dirs, files in os.walk(output_dir+'/'+'IJ_QFL_normalized_vals'):\n", " for file in glob.glob(\"*.csv\"):\n", " files_to_import.append(os.path.join(root,file))\n", "\n", @@ -515,7 +516,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9561d61a", + "id": "independent-selection", "metadata": {}, "outputs": [], "source": [ @@ -529,7 +530,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fdea26d9", + "id": "specific-shield", "metadata": {}, "outputs": [], "source": [ @@ -539,7 +540,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3603cebb", + "id": "ruled-circus", "metadata": {}, "outputs": [], "source": [ @@ -551,7 +552,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3496572a", + "id": "successful-vermont", "metadata": {}, "outputs": [], "source": [ @@ -561,16 +562,16 @@ { "cell_type": "code", "execution_count": null, - "id": "a83aee84", + "id": "standard-tackle", "metadata": {}, "outputs": [], "source": [ - "wtrshd_merge.to_csv(output_dir+'\\\\'+'wtrshd_data_wQFL.csv')" + "wtrshd_merge.to_csv(output_dir+'/'+'wtrshd_data_wQFL.csv')" ] }, { "cell_type": "markdown", - "id": "292b49f0", + "id": "finnish-sydney", "metadata": {}, "source": [ "### Join results with the BasinATLAS level 08 shapefile and export as shapefile\n", @@ -580,18 +581,18 @@ { "cell_type": "code", "execution_count": null, - "id": "85806b9c", + "id": "deluxe-creativity", "metadata": {}, "outputs": [], "source": [ "# Need to download the BasinATLAS (v10) level 8 shapefile and specify filepath below\n", - "wtrshd_shp = gpd.read_file('Z:\\Sharman\\GloPrSM_git\\BasinATLAS_v10_lev08.shp')" + "wtrshd_shp = gpd.read_file('BasinATLAS_v10_lev08.shp')" ] }, { "cell_type": "code", "execution_count": null, - "id": "876408b0", + "id": "absent-airfare", "metadata": {}, "outputs": [], "source": [ @@ -601,11 +602,11 @@ { "cell_type": "code", "execution_count": null, - "id": "88e02cc3", + "id": "funded-handling", "metadata": {}, "outputs": [], "source": [ - "wtrshd_shp_wQFL.to_file(output_dir+'\\\\'+'wtrshd_shp_wQFL.shp')" + "wtrshd_shp_wQFL.to_file(output_dir+'/'+'wtrshd_shp_wQFL.shp')" ] } ],