-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
Copy pathitalian.prop
65 lines (55 loc) · 2.1 KB
/
italian.prop
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
trainFileList = /u/nlp/data/ner/italian/2021-09-15/it_fbk.train.io,/u/nlp/data/ner/italian/2021-09-15/it_fbk.dev.io
testFile = /u/nlp/data/ner/italian/2021-09-15/it_fbk.test.io
serializeTo = italian.crf.ser.gz
type=crf
# distSimLexicon = /u/nlp/data/german/ner/hgc_175m_600
# distSimLexicon = /u/nlp/data/german/ner/2016/hgc-175M-600
# right options for new hgc_175m_600
useDistSim = false
# Now using stripped 2 column files so can add extra datasets!
map = word=0,answer=1
encoding = utf-8
# saveFeatureIndexToDisk = true # now buggy but unnecessary
mergeTags = false
useTitle = false
cleanGazette = True
useGazettes = True
gazette = it-LOC-wikipedia.txt,it-PER-wikipedia.txt,it-ORG-wikipedia.txt
useClassFeature=true
useWord=true
useNGrams=true
noMidNGrams=true
# Having no maxNGramLeng seemed to work marginally better, but omitted for efficiency
maxNGramLeng=6
usePrev=true
useNext=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
# Including useOccurrencePatterns increased scores really marginally (could even disappear now we have weaker regularization)
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
normalize=true
# using chris4 instead hurts in most recent experiment. Earlier, an experiment had seemed to show the opposite.
wordShape=chris2useLC
useDisjunctive=true
# Width 5 works a little better than 4
disjunctionWidth=5
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 15
# sigma 20 works better than sigma 5, which is MUCH better than sigma 1; that was the limit of hyperparameter optimization
# On the basic CoNLL dataset (no distsim, no extra data), sigma=50 is a bit better still (by 0.13 F1)
sigma = 20
# For making faster (less features); changing this to 0.025 doesn't improve performance
featureDiffThresh=0.05
# evaluateIOB=true
# other notes
# even though useTaggySequences will use distsim rather than POS sequences, turning it on didn't help
# adding useWordPairs doesn't seem to help. (Getting them anyway in an edge feature.)