You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# helper functions # function for reading the images# arguments: path to the traffic sign data, for example '../../GTSRB/train/Final_Training/Images/'# returns: list of images, list of corresponding labels defreadTrafficSigns(rootpath):
'''Reads traffic sign data for German Traffic Sign Recognition Benchmark. Arguments: path to the traffic sign data, for example '../dataset/GTSRB/train/Final_Training/Images/' Returns: list of images, list of corresponding labels'''images= [] # imageslabels= [] # corresponding labels# loop over all 42 classesforcinrange(0,43):
prefix=rootpath+'/'+format(c, '05d') +'/'# subdirectory for classgtFile=open(prefix+'GT-'+format(c, '05d') +'.csv') # annotations filegtReader=csv.reader(gtFile, delimiter=';') # csv parser for annotations filegtReader.next() # skip header# loop over all images in current annotations fileforrowingtReader:
images.append(plt.imread(prefix+row[0])) # the 1th column is the filenamelabels.append(row[7]) # the 8th column is the labelgtFile.close()
returnimages, labelsdefrgb2gray(rgb):
r, g, b=rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray=0.2989*r+0.5870*g+0.1140*breturngraydefget_csv(path):
return [os.path.join(path,f) forfinos.listdir(path) iff.endswith('.csv')]
defshowimg_n_hog(grayimg,hogImage):
fig, (ax1, ax2) =plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
ax1.axis('off')
ax1.imshow(grayimg)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
ax2.axis('off')
ax2.imshow(hogImage, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
plt.show()
# Functions for testimages testpath="../dataset/GTSRB/test/Final_Test/Images/"defloadtestimages_from_path(testpath):
print("[INFO] reading all test images from directory\n")
gtFile=get_csv(testpath)
filename=gtFile[0]
raw_data=open(filename, 'rt')
reader=csv.reader(raw_data, delimiter=';', quoting=csv.QUOTE_NONE)
reader.next()
testfiles=list(reader)
timg= []
testimg= []
# tlbl = []foriintestfiles:
# print (i[0],i[-1])fname=os.path.join(testpath,i[0])
timg.append(fname)
tim=plt.imread(fname)
testimg.append(tim)
# label = i[-1]# tlbl.append(label)np.save("Image_n_Labels/testimagenames.npy",timg)
np.save("Image_n_Labels/testimages.npy",testimg)
# np.save("Image_n_Labels/testimagelabels.npy",tlbl)returntimg,testimgdefloadtestimages_from_npy():
print("[INFO] loading from .npy\n")
timg=np.load("Image_n_Labels/testimagenames.npy")
testimg=np.load("Image_n_Labels/testimages.npy")
print("[INFO] DONE!loaded from .npy\n")
# tlbl = np.load("Image_n_Labels/testimagelabels.npy")returntimg,testimg
load training images
ifos.path.isfile("Image_n_Labels/trainImages.npy") &os.path.isfile("Image_n_Labels/trainLabels.npy") :
X=np.load("Image_n_Labels/trainImages.npy")
y=np.load("Image_n_Labels/trainLabels.npy")
print("[INFO] Training images and labels are loaded in variables ==> X,y")
print("[INFO] Number of training Images {} \nNumber of Labels {}".format(len(X), len(y)))
else:
# training images and labelstrainImages, trainLabels=readTrafficSigns('../dataset/GTSRB/train/Final_Training/Images/')
np.save("Image_n_Labels/trainImages.npy",trainImages)
np.save("Image_n_Labels/trainLabels.npy",trainLabels)
print("[INFO] training images and labels are read from the dataset directory")
print("[INFO] training images saved to Image_n_Labels/trainingImages.npy for further use")
print("[INFO] training labels saved to Image_n_Labels/trainingLabels.npy for further use")
[INFO] Training images and labels are loaded in variables ==> X,y
[INFO] Number of training Images 39209
Number of Labels 39209
load test images
if (os.path.isfile("Image_n_Labels/testimagenames.npy") &os.path.isfile("Image_n_Labels/testimages.npy")):
timg,testimg=loadtestimages_from_npy()
else:
timg,testimg=loadtestimages_from_path(testpath)
[INFO] loading from .npy
[INFO] DONE!loaded from .npy
print("[INFO] There are total {} test images availabe".format(len(timg)))
print("[INFO] Example {}".format(timg[12629]))
plt.imshow(plt.imread(timg[0]))
[INFO] There are total 12630 test images availabe
[INFO] Example ../dataset/GTSRB/test/Final_Test/Images/12629.ppm
<matplotlib.image.AxesImage at 0x7fcb466331d0>
y.astype("float")
array([ 0., 0., 0., ..., 42., 42., 42.])
# simple test on single image for HoG featuresn=np.random.randint(0,len(X))
i1=X[n]
grayim=rgb2gray(i1)
gI1=transform.resize(grayim,(40,40))
# gI2 = cv2.resize(grayim, (40, 40), interpolation = cv2.INTER_CUBIC)
(H, hogImage) =feature.hog(gI1, orientations=9, pixels_per_cell=(8,8),
cells_per_block=(2, 2), transform_sqrt=True, visualise=True)
hogImage=exposure.rescale_intensity(hogImage, out_range=(0, 255)).astype("uint8")
showimg_n_hog(gI1, hogImage)
print(len(H))
576
Extract HoG features over all training images
ifos.path.isfile("HoGFeatures/HoGfeatures.npy") &os.path.isfile("HoGFeatures/HoGvisualize.npy") :
print("[INFO] loading from file ... ")
hogfeat=np.load("HoGFeatures/HoGfeatures.npy")
hogviz=np.load("HoGFeatures/HoGvisualize.npy")
print("HoG features are loaded from HoGfeatures.npy to variable ==> hogfeat")
print("HoG visualizations are loaded from HoGvisualize.npy to variable ==> hogviz")
else:
print("[INFO] HoGfeatures.npy does not found")
Hviz= []
Hfeat= []
foriinrange(0,len(X)):
# show an update every 1,000 imagesifi>0andi%1000==0:
print("[INFO] processed {}/{}".format(i, len(X)))
I=X[i]
grayim=rgb2gray(I)
grayim=transform.resize(grayim,(40,40))
(H_4x4, hogImage) =feature.hog(grayim, orientations=9, pixels_per_cell=(4, 4),
cells_per_block=(2, 2), transform_sqrt=True, visualise=True)
hogImage=exposure.rescale_intensity(hogImage, out_range=(0, 255)).astype("uint8")
# hogImage = hogImage.astype("uint8")Hviz.append(hogImage)
Hfeat.append(H_4x4)
# save the features using numpy save with .npy extention # which reduced the storage space by 4times compared to picklenp.save("HoGFeatures/HoGfeatures.npy", Hfeat)
np.save("HoGFeatures/HoGvisualize.npy", Hviz)
print("[INFO] HoGfeatures.npy are saved")
print("[INFO] HoGvisualize.npy are saved")
[INFO] loading from file ...
HoG features are loaded from HoGfeatures.npy to variable ==> hogfeat
HoG visualizations are loaded from HoGvisualize.npy to variable ==> hogviz
Extract HoG features over all testing images
ifos.path.isfile("HoGFeatures/HoGfeatures_test.npy") &os.path.isfile("HoGFeatures/HoGvisualize_test.npy") :
hogfeat_test=np.load("HoGFeatures/HoGfeatures_test.npy")
hogviz_test=np.load("HoGFeatures/HoGvisualize_test.npy")
print("HoG features are loaded from HoGfeatures_test.npy to variable ==> hogfeat_test")
print("HoG visualizations are loaded from HoGvisualize_test.npy to variable ==> hogviz_test")
else:
print("HoGfeatures_test.npy does not found")
Hviz= []
Hfeat= []
foriinrange(0,len(X)):
# show an update every 1,000 imagesifi>0andi%1000==0:
print("[INFO] processed {}/{}".format(i, len(X)))
I=X[i]
grayim=rgb2gray(I)
grayim=transform.resize(grayim,(40,40))
(H_4x4, hogImage) =feature.hog(grayim, orientations=9, pixels_per_cell=(4, 4),
cells_per_block=(2, 2), transform_sqrt=True, visualise=True)
hogImage=exposure.rescale_intensity(hogImage, out_range=(0, 255)).astype("uint8")
# hogImage = hogImage.astype("uint8")Hviz.append(hogImage)
Hfeat.append(H_4x4)
# save the features using numpy save with .npy extention # which reduced the storage space by 4times compared to picklenp.save("HoGFeatures/HoGfeatures_test.npy", Hfeat)
np.save("HoGFeatures/HoGvisualize_test.npy", Hviz)
print("HoGfeatures_test.npy are saved")
print("HoGvisualize_test.npy are saved")
HoG features are loaded from HoGfeatures_test.npy to variable ==> hogfeat_test
HoG visualizations are loaded from HoGvisualize_test.npy to variable ==> hogviz_test
features=Xhoglabels=yXtest=X_t# take the data and construct the training and testing split, using 75% of the# data for training and 25% for testing
(trainData, testData, trainLabels, testLabels) =train_test_split(features,
labels, test_size=0.25, random_state=42)
# now, let's take 10% of the training data and use that for validation
(trainData, valData, trainLabels, valLabels) =train_test_split(trainData, trainLabels,
test_size=0.1, random_state=84)
# show the sizes of each data splitprint("training data points: {}".format(len(trainLabels)))
print("validation data points: {}".format(len(valLabels)))
print("testing data points: {}".format(len(testLabels)))
training data points: 26465
validation data points: 2941
testing data points: 9803
# MLPifos.path.isfile("clf/clf_rf_hog.pkl"):
print("[INFO] loading classifier: Random Forest trained on HoG features...")
svc=joblib.load("clf/clf_rf_hog.pkl")
print("[INFO] Classifer is loaded as instance ::svc::")
else:
print("[INFO] pre-trained classifier not found. \n Training Classifier Random Forest")
rf=RandomForestClassifier()
rf.fit(trainData,trainLabels)
# scores = cross_val_score(rf,testData,testLabels)rf.fit(trainData,trainLabels)
print("[INFO] Succefully trained the classsifier. \n Saving the classifier for further use")
joblib.dump(rf, 'clf/clf_rf_hog.pkl')
print("[INFO] Classifier Saved")
[INFO] pre-trained classifier not found.
Training Classifier Random Forest
[INFO] Succefully trained the classsifier.
Saving the classifier for further use
[INFO] Classifier Saved
print("accuracy on training data: {}".format(rf.score(trainData,trainLabels)))
accuracy on training data: 0.999319856414
print("accuracy on test data: {}".format(rf.score(testData,testLabels)))
accuracy on test data: 0.881056819341
print("accuracy on validation data: {}".format(rf.score(valData,valLabels)))
predictions=rf.predict(testData)
# show a final classification report demonstrating the accuracy of the classifierprint("EVALUATION ON TESTING DATA")
print(classification_report(testLabels, predictions))