Skip to content

Commit

Permalink
update broken link
Browse files Browse the repository at this point in the history
  • Loading branch information
mrdbourke committed Mar 17, 2021
1 parent 2fcbdd4 commit 6aafee7
Showing 1 changed file with 26 additions and 24 deletions.
50 changes: 26 additions & 24 deletions 06_transfer_learning_in_tensorflow_part_3_scaling_up.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"provenance": [],
"collapsed_sections": [],
"mount_file_id": "1WbPqk3XTXrHdu181_3KgdtU5EY3CteNM",
"authorship_tag": "ABX9TyMmn3XTjMHhMIwHwabFw4e1",
"authorship_tag": "ABX9TyNVpZ5Eh30iAE3gqa9S2vMi",
"include_colab_link": true
},
"kernelspec": {
Expand Down Expand Up @@ -35,7 +35,7 @@
"source": [
"# Transfer Learning with TensorFlow Part 3: Scaling up (🍔👁 Food Vision mini)\n",
"\n",
"In the previous two notebooks ([transfer learning part 1: feature extraction](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/04_transfer_learning_in_tensorflow_part_1_feature_extraction.ipynb) and [part 2: fine-tuning](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/04_transfer_learning_in_tensorflow_part_1_feature_extraction.ipynb)) we've seen the power of transfer learning.\n",
"In the previous two notebooks ([transfer learning part 1: feature extraction](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/04_transfer_learning_in_tensorflow_part_1_feature_extraction.ipynb) and [part 2: fine-tuning](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/05_transfer_learning_in_tensorflow_part_2_fine_tuning.ipynb)) we've seen the power of transfer learning.\n",
"\n",
"Now we know our smaller modelling experiments are working, it's time to step things up a notch with more data.\n",
"\n",
Expand Down Expand Up @@ -72,7 +72,9 @@
"\n",
"You don't have to write the text descriptions but writing the code yourself is a great way to get hands-on experience.\n",
"\n",
"Don't worry if you make mistakes, we all do. The way to get better and make less mistakes is to **write more code**."
"Don't worry if you make mistakes, we all do. The way to get better and make less mistakes is to **write more code**.\n",
"\n",
"> 📖 **Resource:** See the full set of course materials on GitHub: https://github.com/mrdbourke/tensorflow-deep-learning"
]
},
{
Expand All @@ -88,7 +90,7 @@
"# Are we using a GPU?\n",
"!nvidia-smi"
],
"execution_count": 1,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -142,7 +144,7 @@
"# Get helper functions file\n",
"!wget https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/extras/helper_functions.py"
],
"execution_count": 2,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -172,7 +174,7 @@
"# Import series of helper functions for the notebook (we've created/used these in previous notebooks)\n",
"from helper_functions import create_tensorboard_callback, plot_loss_curves, unzip_data, compare_historys, walk_through_dir"
],
"execution_count": 3,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -223,7 +225,7 @@
"train_dir = \"101_food_classes_10_percent/train/\"\n",
"test_dir = \"101_food_classes_10_percent/test/\""
],
"execution_count": 4,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -257,7 +259,7 @@
"# How many images/classes are there?\n",
"walk_through_dir(\"101_food_classes_10_percent\")"
],
"execution_count": 5,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -531,7 +533,7 @@
" image_size=IMG_SIZE,\n",
" shuffle=False) # don't shuffle test data for prediction analysis"
],
"execution_count": 6,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -593,7 +595,7 @@
" monitor=\"val_accuracy\", # save the model weights which score the best validation accuracy\n",
" save_best_only=True) # only keep the best model weights on file (delete the rest)"
],
"execution_count": 7,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -626,7 +628,7 @@
" # preprocessing.Rescaling(1./255) # keep for models like ResNet50V2, remove for EfficientNet\n",
"], name=\"data_augmentation\")"
],
"execution_count": 8,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -664,7 +666,7 @@
"outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation=\"softmax\", name=\"output_layer\")(x) # same number of outputs as classes\n",
"model = tf.keras.Model(inputs, outputs)"
],
"execution_count": 9,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -701,7 +703,7 @@
"# Get a summary of our model\n",
"model.summary()"
],
"execution_count": 10,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -764,7 +766,7 @@
" validation_steps=int(0.15 * len(test_data)), # evaluate on smaller portion of test data\n",
" callbacks=[checkpoint_callback]) # save best model weights to file"
],
"execution_count": 11,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -807,7 +809,7 @@
"results_feature_extraction_model = model.evaluate(test_data)\n",
"results_feature_extraction_model"
],
"execution_count": 12,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -854,7 +856,7 @@
"source": [
"plot_loss_curves(history_all_classes_10_percent)"
],
"execution_count": 13,
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
Expand Down Expand Up @@ -923,7 +925,7 @@
"for layer in base_model.layers[:-5]:\n",
" layer.trainable = False"
],
"execution_count": 14,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -953,7 +955,7 @@
" optimizer=tf.keras.optimizers.Adam(1e-4), # 10x lower learning rate than default\n",
" metrics=['accuracy'])"
],
"execution_count": 15,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -979,7 +981,7 @@
"for layer in model.layers:\n",
" print(layer.name, layer.trainable)"
],
"execution_count": 16,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -1008,7 +1010,7 @@
"for layer_number, layer in enumerate(base_model.layers):\n",
" print(layer_number, layer.name, layer.trainable)"
],
"execution_count": 17,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -1287,7 +1289,7 @@
" validation_steps=int(0.15 * len(test_data)), # validate on 15% of the test data\n",
" initial_epoch=history_all_classes_10_percent.epoch[-1]) # start from previous last epoch"
],
"execution_count": 18,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -1332,7 +1334,7 @@
"results_all_classes_10_percent_fine_tune = model.evaluate(test_data)\n",
"results_all_classes_10_percent_fine_tune"
],
"execution_count": 19,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -1381,7 +1383,7 @@
" new_history=history_all_classes_10_percent_fine_tune,\n",
" initial_epochs=5)"
],
"execution_count": 20,
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
Expand Down Expand Up @@ -1442,7 +1444,7 @@
"id": "BmVtMOwMgrxh"
},
"source": [
"## Evaluting the performance of the big dog model across all different classes\n",
"## Evaluating the performance of the big dog model across all different classes\n",
"\n",
"We've got a trained and saved model which according to the evaluation metrics we've used is performing fairly well.\n",
"\n",
Expand Down

0 comments on commit 6aafee7

Please sign in to comment.