Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
13 changed files
with
660 additions
and
53 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
{ | ||
"cells": [], | ||
"metadata": {}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,230 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 23, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Predicted: [('n02123045', 'tabby', 0.84942955), ('n02123159', 'tiger_cat', 0.072093554), ('n02124075', 'Egyptian_cat', 0.069290839)]\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"from keras.applications.resnet50 import ResNet50\n", | ||
"from keras.preprocessing import image\n", | ||
"from keras.applications.resnet50 import preprocess_input, decode_predictions\n", | ||
"import numpy as np\n", | ||
"\n", | ||
"model = ResNet50(weights='imagenet')\n", | ||
"\n", | ||
"img_path = 'outfile4.jpg'\n", | ||
"img = image.load_img(img_path, target_size=(224, 224))\n", | ||
"x = image.img_to_array(img)\n", | ||
"x = np.expand_dims(x, axis=0)\n", | ||
"x = preprocess_input(x)\n", | ||
"\n", | ||
"preds = model.predict(x)\n", | ||
"# decode the results into a list of tuples (class, description, probability)\n", | ||
"# (one such list for each sample in the batch)\n", | ||
"print('Predicted:', decode_predictions(preds, top=3)[0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 26, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import keras\n", | ||
"model=keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 28, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Predicted: [('n02747177', 'ashcan', 0.99999988), ('n03873416', 'paddle', 6.22831e-08), ('n03047690', 'clog', 4.9466548e-10)]\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"preds = model.predict(x)\n", | ||
"# decode the results into a list of tuples (class, description, probability)\n", | ||
"# (one such list for each sample in the batch)\n", | ||
"print('Predicted:', decode_predictions(preds, top=3)[0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 31, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"model=keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 32, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Predicted: [('n06596364', 'comic_book', 1.0), ('n04517823', 'vacuum', 2.6264047e-12), ('n02791124', 'barber_chair', 5.7342921e-29)]\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"preds = model.predict(x)\n", | ||
"# decode the results into a list of tuples (class, description, probability)\n", | ||
"# (one such list for each sample in the batch)\n", | ||
"print('Predicted:', decode_predictions(preds, top=3)[0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 33, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5\n", | ||
"91889664/91884032 [==============================] - 34s 0us/step\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"model=keras.applications.xception.Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 34, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Predicted: [('n03814906', 'necklace', 0.81945795), ('n02834397', 'bib', 0.17957036), ('n03594945', 'jeep', 0.00075779692)]\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"preds = model.predict(x)\n", | ||
"# decode the results into a list of tuples (class, description, probability)\n", | ||
"# (one such list for each sample in the batch)\n", | ||
"print('Predicted:', decode_predictions(preds, top=3)[0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 35, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5\n", | ||
"574717952/574710816 [==============================] - 158s 0us/step\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"model=keras.applications.vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 36, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Predicted: [('n02123045', 'tabby', 0.5318045), ('n02123159', 'tiger_cat', 0.30907613), ('n02124075', 'Egyptian_cat', 0.132202)]\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"preds = model.predict(x)\n", | ||
"# decode the results into a list of tuples (class, description, probability)\n", | ||
"# (one such list for each sample in the batch)\n", | ||
"print('Predicted:', decode_predictions(preds, top=3)[0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5\n", | ||
"433479680/553467096 [======================>.......] - ETA: 45s" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"model=keras.applications.vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"preds = model.predict(x)\n", | ||
"# decode the results into a list of tuples (class, description, probability)\n", | ||
"# (one such list for each sample in the batch)\n", | ||
"print('Predicted:', decode_predictions(preds, top=3)[0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.5.2" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
import numpy as np | ||
import json | ||
|
||
from keras.utils.data_utils import get_file | ||
from keras import backend as K | ||
|
||
CLASS_INDEX = None | ||
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json' | ||
|
||
|
||
def preprocess_input(x, dim_ordering='default'): | ||
if dim_ordering == 'default': | ||
dim_ordering = K.image_dim_ordering() | ||
assert dim_ordering in {'tf', 'th'} | ||
|
||
if dim_ordering == 'th': | ||
x[:, 0, :, :] -= 103.939 | ||
x[:, 1, :, :] -= 116.779 | ||
x[:, 2, :, :] -= 123.68 | ||
# 'RGB'->'BGR' | ||
x = x[:, ::-1, :, :] | ||
else: | ||
x[:, :, :, 0] -= 103.939 | ||
x[:, :, :, 1] -= 116.779 | ||
x[:, :, :, 2] -= 123.68 | ||
# 'RGB'->'BGR' | ||
x = x[:, :, :, ::-1] | ||
return x | ||
|
||
|
||
def decode_predictions(preds, top=5): | ||
global CLASS_INDEX | ||
if len(preds.shape) != 2 or preds.shape[1] != 1000: | ||
raise ValueError('`decode_predictions` expects ' | ||
'a batch of predictions ' | ||
'(i.e. a 2D array of shape (samples, 1000)). ' | ||
'Found array with shape: ' + str(preds.shape)) | ||
if CLASS_INDEX is None: | ||
fpath = get_file('imagenet_class_index.json', | ||
CLASS_INDEX_PATH, | ||
cache_subdir='models') | ||
CLASS_INDEX = json.load(open(fpath)) | ||
results = [] | ||
for pred in preds: | ||
top_indices = pred.argsort()[-top:][::-1] | ||
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices] | ||
results.append(result) | ||
return results |
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Oops, something went wrong.