/
cidereval.py
52 lines (39 loc) · 1.32 KB
/
cidereval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# coding: utf-8
# In[1]:
# demo script for running CIDEr
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from pydataformat.loadData import LoadData
from pyciderevalcap.eval import CIDErEvalCap as ciderEval
# load the configuration file
config = json.loads(open('params.json', 'r').read())
pathToData = config['pathToData']
refName = config['refName']
candName = config['candName']
resultFile = config['resultFile']
df_mode = config['idf']
# Print the parameters
print("Running CIDEr with the following settings")
print("*****************************")
print("Reference File:%s" % (refName))
print("Candidate File:%s" % (candName))
print("Result File:%s" % (resultFile))
print("IDF:%s" % (df_mode))
print("*****************************")
# In[2]:
# load reference and candidate sentences
loadDat = LoadData(pathToData)
gts, res = loadDat.readJson(refName, candName)
# In[3]:
# calculate cider scores
scorer = ciderEval(gts, res, 'corpus')
# scores: dict of list with key = metric and value = score given to each
# candidate
scores = scorer.evaluate()
# In[7]:
# scores['CIDEr'] contains CIDEr scores in a list for each candidate
# scores['CIDErD'] contains CIDEr-D scores in a list for each candidate
with open(resultFile, 'w') as outfile:
json.dump(scores, outfile)