Skip to content

Commit

Permalink
Merge pull request #36 from intel-isl/windows-bugfix
Browse files Browse the repository at this point in the history
Windows bugfix
  • Loading branch information
thias15 committed Sep 17, 2020
2 parents 4e56062 + 5b93218 commit 64b0865
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 72 deletions.
2 changes: 1 addition & 1 deletion policy/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def load_labels(self):
with open(os.path.join(self.data_dir, dataset, folder,"sensor_data","matched_frame_ctrl_cmd_processed.txt")) as f_input:
header = f_input.readline() #discard header
data = f_input.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
lines = data.replace(","," ").replace("\\","/").replace("\r","").replace("\t"," ").split("\n")
data = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
#Tuples containing id: framepath and label: left,right,cmd
data = [(l[1],l[2:]) for l in data if len(l)>1]
Expand Down
139 changes: 68 additions & 71 deletions policy/policy_learning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import time\n",
"#import glob\n",
"import os\n",
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n",
"# 0 = all messages are logged (default behavior)\n",
Expand Down Expand Up @@ -95,6 +94,13 @@
"tf.__version__"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data processing"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand All @@ -115,8 +121,8 @@
},
"outputs": [],
"source": [
"base_dir = \"./dataset\"\n",
"dataset_name = 'openbot'"
"base_dir = \"dataset\"\n",
"dataset_name = \"my_openbot\""
]
},
{
Expand All @@ -125,7 +131,7 @@
"metadata": {},
"outputs": [],
"source": [
"train_data_dir = os.path.join(base_dir, \"train_data\")\n",
"train_data_dir = os.path.join(base_dir,\"train_data\")\n",
"train_datasets = [\"my_openbot_1\", \"my_openbot_2\"]\n",
"\n",
"test_data_dir = os.path.join(base_dir, \"test_data\")\n",
Expand All @@ -146,7 +152,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Generate frame control pairs"
"Running this for the first time will take some time. This code will match image frames to the controls (labels) and indicator signals (commands). By default, data samples where the vehicle was stationary will be removed. If this is not desired, you need to pass `remove_zeros=False`. If you have made any changes to the sensor files, changed `remove_zeros` or moved your dataset to a new directory, you need to pass `redo_matching=True`. "
]
},
{
Expand All @@ -157,8 +163,16 @@
"source": [
"import associate_frames\n",
"max_offset = 1e3 #1ms\n",
"train_frames = associate_frames.match_frame_ctrl_cmd(train_data_dir, train_datasets, max_offset)\n",
"test_frames = associate_frames.match_frame_ctrl_cmd(test_data_dir, test_datasets, max_offset)"
"train_frames = associate_frames.match_frame_ctrl_cmd(train_data_dir, \n",
" train_datasets, \n",
" max_offset, \n",
" redo_matching=False, \n",
" remove_zeros=True)\n",
"test_frames = associate_frames.match_frame_ctrl_cmd(test_data_dir, \n",
" test_datasets, \n",
" max_offset, \n",
" redo_matching=False, \n",
" remove_zeros=True)"
]
},
{
Expand All @@ -167,34 +181,54 @@
"metadata": {},
"outputs": [],
"source": [
"# train_frames = list(data_dir.glob('train/*/images/*.jpeg'))\n",
"# test_frames = list(data_dir.glob('test/*/images/*.jpeg'))"
"image_count_train = len(train_frames) \n",
"image_count_test = len(test_frames) \n",
"print(\"There are %d train images and %d test images\" %(image_count_train, image_count_test))"
]
},
{
"cell_type": "code",
"execution_count": null,
"cell_type": "markdown",
"metadata": {},
"outputs": [],
"source": [
"image_count_train = len(train_frames) \n",
"image_count_test = len(test_frames) \n",
"print(\"There are %d train images and %d test images\" %(image_count_train, image_count_test))"
"## Hyperparameters"
]
},
{
"cell_type": "code",
"execution_count": null,
"cell_type": "markdown",
"metadata": {
"colab": {},
"colab_type": "code",
"id": "1zf695or-Flq"
},
"source": [
"You may have to tune the learning rate and batch size depending on your available compute resources and dataset. As a general rule of thumb, if you increase the batch size by a factor of n, you can increase the learning rate by a factor of sqrt(n). For debugging and hyperparamter tuning, you can set the number of epochs to a small value like 10. If you want to train a model which will achieve good performance, you should set it to 50 or more. In our paper we used 100."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TRAIN_BATCH_SIZE = 16 #128\n",
"TEST_BATCH_SIZE = 16 #128\n",
"\n",
"LR = 0.0001 #0.0003\n",
"NUM_EPOCHS = 10 #100"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Don't change these unless you know what you are doing"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Input dimensions\n",
"IMG_HEIGHT = 720\n",
"IMG_WIDTH = 1280\n",
Expand All @@ -210,7 +244,8 @@
"\n",
"STEPS_PER_EPOCH = np.ceil(image_count_train/TRAIN_BATCH_SIZE)\n",
"\n",
"FLIP_AUG = True\n",
"BN = True\n",
"FLIP_AUG = False\n",
"CMD_AUG = False"
]
},
Expand Down Expand Up @@ -242,28 +277,7 @@
"id": "IIG5CPaULegg"
},
"source": [
"To load the files as a `tf.data.Dataset` first create a dataset of the file paths:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "lAkQp5uxoINu"
},
"outputs": [],
"source": [
"# list_train_ds = tf.data.Dataset.list_files(str(data_dir/'train/*/images/*'))\n",
"# list_test_ds = tf.data.Dataset.list_files(str(data_dir/'test/*/images/*'))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Build list: This may take some time"
"To load the files as a `tf.data.Dataset` first create a dataset of the file paths. Depending on dataset size, this may take some time. If you encounter issues, you can use the commented lines instead. However, this will take **much** longer."
]
},
{
Expand All @@ -272,8 +286,10 @@
"metadata": {},
"outputs": [],
"source": [
"list_train_ds = tf.data.Dataset.list_files(train_frames)\n",
"list_test_ds = tf.data.Dataset.list_files(test_frames)"
"# list_train_ds = tf.data.Dataset.list_files(train_frames)\n",
"# list_test_ds = tf.data.Dataset.list_files(test_frames)\n",
"list_train_ds = tf.data.Dataset.list_files(str(train_data_dir+'/*/*/images/*'))\n",
"list_test_ds = tf.data.Dataset.list_files(str(test_data_dir+'/*/*/images/*'))"
]
},
{
Expand Down Expand Up @@ -437,7 +453,7 @@
"id": "91CPfUUJ_8SZ"
},
"source": [
"Write a short pure-tensorflow function that converts a file paths to an (image_data, label) pair:"
"Short pure-tensorflow function that converts a file path to an (image_data, label) pair:"
]
},
{
Expand All @@ -451,7 +467,7 @@
"outputs": [],
"source": [
"def process_train_path(file_path):\n",
" cmd, label = train_data.get_label(file_path)\n",
" cmd, label = train_data.get_label(tf.strings.regex_replace(file_path,\"[/\\\\\\\\]\",\"/\"))\n",
" # load the raw data from the file as a string\n",
" img = tf.io.read_file(file_path)\n",
" img = decode_img(img)\n",
Expand All @@ -470,7 +486,7 @@
"outputs": [],
"source": [
"def process_test_path(file_path):\n",
" cmd, label = test_data.get_label(file_path)\n",
" cmd, label = test_data.get_label(tf.strings.regex_replace(file_path,\"[/\\\\\\\\]\",\"/\"))\n",
" # load the raw data from the file as a string\n",
" img = tf.io.read_file(file_path)\n",
" img = decode_img(img)\n",
Expand Down Expand Up @@ -498,8 +514,7 @@
"outputs": [],
"source": [
"# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.\n",
"#labeled_ds = list_train_ds.map(process_train_path, num_parallel_calls=4)\n",
"labeled_ds = list_train_ds.map(process_train_path)"
"labeled_ds = list_train_ds.map(process_train_path, num_parallel_calls=4)"
]
},
{
Expand Down Expand Up @@ -578,9 +593,7 @@
"import metrics\n",
"import callbacks\n",
"\n",
"LR = 0.0003\n",
"BN = True\n",
"model = models.cil_mobile(NETWORK_IMG_WIDTH,NETWORK_IMG_HEIGHT,BN)\n",
"model = models.pilot_net(NETWORK_IMG_WIDTH,NETWORK_IMG_HEIGHT,BN)\n",
"loss_fn = losses.sq_weighted_mse_angle \n",
"metric_list = ['MeanAbsoluteError', metrics.direction_metric, metrics.angle_metric]\n",
"\n",
Expand Down Expand Up @@ -618,7 +631,7 @@
"outputs": [],
"source": [
"history = model.fit(train_ds, \n",
" epochs=10, \n",
" epochs=NUM_EPOCHS, \n",
" steps_per_epoch=STEPS_PER_EPOCH, \n",
" validation_data=test_ds, \n",
" callbacks=[callbacks.checkpoint_cb(checkpoint_path),\n",
Expand Down Expand Up @@ -789,16 +802,7 @@
"metadata": {},
"outputs": [],
"source": [
"time.sleep(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"utils.save_notebook()"
"#time.sleep(30)"
]
},
{
Expand All @@ -807,18 +811,11 @@
"metadata": {},
"outputs": [],
"source": [
"time.sleep(10)\n",
"utils.save_notebook()\n",
"current_file = 'policy_learning.ipynb'\n",
"output_file = os.path.join(log_path,'notebook.html')\n",
"utils.output_HTML(current_file, output_file)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down Expand Up @@ -849,7 +846,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
"version": "3.7.9"
}
},
"nbformat": 4,
Expand Down

0 comments on commit 64b0865

Please sign in to comment.