/
saliconEvalDemo.py
81 lines (51 loc) · 1.73 KB
/
saliconEvalDemo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
# coding: utf-8
# In[9]:
#get_ipython().magic(u'reload_ext autoreload')
#get_ipython().magic(u'autoreload 2')
#get_ipython().magic(u'matplotlib inline')
from salicon.salicon import SALICON
from saliconeval.eval import SALICONEval
import matplotlib.pyplot as plt
import skimage.io as io
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
# In[10]:
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.3f')
# In[11]:
dataDir='.'
dataType='train2014examples'
algName = 'fake'
annFile='%s/annotations/fixations_%s.json'%(dataDir,dataType)
subtypes=['results', 'evalImgs', 'eval']
[resFile, evalImgsFile, evalFile]= ['%s/results/fixations_%s_%s_%s.json'%(dataDir,dataType,algName,subtype) for subtype in subtypes]
# In[12]:
# create coco object and cocoRes object
salicon = SALICON(annFile)
saliconRes = salicon.loadRes(resFile)
# In[13]:
# create cocoEval object by taking coco and cocoRes
saliconEval = SALICONEval(salicon, saliconRes)
# evaluate on a subset of images by setting
# cocoEval.params['image_id'] = cocoRes.getImgIds()
saliconEval.params['image_id'] = saliconRes.getImgIds()
# evaluate results
saliconEval.evaluate()
# print output evaluation scores
print "Final Result for each Metric:"
for metric, score in saliconEval.eval.items():
print '%s: %.3f'%(metric, score)
# In[6]:
# plot score histogram
saucScores = [eva['SAUC'] for eva in saliconEval.evalImgs]
plt.hist(saucScores)
plt.title('Histogram of SAUC Scores', fontsize=20)
plt.xlabel('SAUC score', fontsize=20)
plt.ylabel('result counts', fontsize=20)
plt.show()
# In[14]:
# save evaluation results to ./results folder
json.dump(saliconEval.evalImgs, open(evalImgsFile, 'w'))
json.dump(saliconEval.eval, open(evalFile, 'w'))
# In[ ]: