Skip to content

Commit

Permalink
Auto-formatted code to conform with Google style guide for Java.
Browse files Browse the repository at this point in the history
  • Loading branch information
jganitkevitch committed Apr 15, 2012
1 parent 21990ad commit 42f4404
Show file tree
Hide file tree
Showing 223 changed files with 65,045 additions and 65,911 deletions.
281 changes: 281 additions & 0 deletions .settings/org.eclipse.jdt.core.prefs

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions .settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,3 @@
eclipse.preferences.version=1
formatter_profile=_GoogleStyle
formatter_settings_version=12
386 changes: 193 additions & 193 deletions examples/ZMERT/README_ZMERT.txt

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions examples/ZMERT/ZMERT_config_ex2.txt
@@ -1,30 +1,30 @@
### Commonly used parameters
-dir ZMERT_example # working directory (i.e. location of relevant files)
#-s src.txt # source sentences file name
-r ref # target sentences file name (in this case, file name prefix)
-rps 4 # references per sentence
-p params.txt # parameter file
-m BLEU 4 closest # evaluation metric and its options
-maxIt 2 # maximum MERT iterations
-ipi 20 # number of intermediate initial points per iteration
-cmd ./decoder_command_ex2 # file containing commands to run decoder
-decOut nbest_ex2.out # file prodcued by decoder
-dcfg config_ex2.txt # decoder config file
-N 300 # size of N-best list generated each iteration
-v 1 # verbosity level (0-2; higher value => more verbose)
-seed 12341234 # random number generator seed
# Notice that comments are allowed
### Other parameters (run "ZMERT -h" for default values)
#-txtNrm # text normalization method
#-fin # output file for final values
#-prevIt # previous MERT iterations from which to consider candidates (in addition to the current iteration)
#-minIt # minimum MERT iterations before considering an early exit
#-stopIt # number of consecutive iterations an early exit criterion must be satisfied before actually exiting
#-stopSig # value over which a weight change is "significant" (for early exit purposes)
#-save # should MERT save intermediate config files or decoder output files? (or both? or neither?)
#-opi # should MERT modify at most one parameter per iteration?
#-rand # should first initial point (of first iteration) be initialized randomly?
#-decExit # return value by decoder indicating success
#-decV # should decoder output be printed?
### Commonly used parameters
-dir ZMERT_example # working directory (i.e. location of relevant files)
#-s src.txt # source sentences file name
-r ref # target sentences file name (in this case, file name prefix)
-rps 4 # references per sentence
-p params.txt # parameter file
-m BLEU 4 closest # evaluation metric and its options
-maxIt 2 # maximum MERT iterations
-ipi 20 # number of intermediate initial points per iteration
-cmd ./decoder_command_ex2 # file containing commands to run decoder
-decOut nbest_ex2.out # file prodcued by decoder
-dcfg config_ex2.txt # decoder config file
-N 300 # size of N-best list generated each iteration
-v 1 # verbosity level (0-2; higher value => more verbose)
-seed 12341234 # random number generator seed

# Notice that comments are allowed

### Other parameters (run "ZMERT -h" for default values)
#-txtNrm # text normalization method
#-fin # output file for final values
#-prevIt # previous MERT iterations from which to consider candidates (in addition to the current iteration)
#-minIt # minimum MERT iterations before considering an early exit
#-stopIt # number of consecutive iterations an early exit criterion must be satisfied before actually exiting
#-stopSig # value over which a weight change is "significant" (for early exit purposes)
#-save # should MERT save intermediate config files or decoder output files? (or both? or neither?)
#-opi # should MERT modify at most one parameter per iteration?
#-rand # should first initial point (of first iteration) be initialized randomly?
#-decExit # return value by decoder indicating success
#-decV # should decoder output be printed?
140 changes: 70 additions & 70 deletions examples/ZMERT/config_ex2.txt
@@ -1,70 +1,70 @@
lm_file=example2/example2.4gram.lm.gz
tm_file=example2/example2.hiero.tm.gz
tm_format=hiero
glue_file=grammars/hiero.glue
glue_format=hiero
#lm config
use_srilm=true
lm_ceiling_cost=100
use_left_equivalent_state=false
use_right_equivalent_state=false
order=4
#tm config
span_limit=10
phrase_owner=pt
mono_owner=mono
begin_mono_owner=begin_mono
default_non_terminal=X
goalSymbol=S
#pruning config
fuzz1=0.1
fuzz2=0.1
max_n_items=30
relative_threshold=10.0
max_n_rules=50
rule_relative_threshold=10.0
#nbest config
use_unique_nbest=true
use_tree_nbest=false
add_combined_cost=true
top_n=300
#remoter lm server config,we should first prepare remote_symbol_tbl before starting any jobs
use_remote_lm_server=false
remote_symbol_tbl=./voc.remote.sym
num_remote_lm_servers=4
f_remote_server_list=./remote.lm.server.list
remote_lm_server_port=9000
#parallel deocoder: it cannot be used together with remote lm
num_parallel_decoders=1
parallel_files_prefix=/tmp/
###### model weights
#lm order weight
lm 1.0
#phrasemodel owner column(0-indexed) weight
phrasemodel pt 0 1.066893
phrasemodel pt 1 0.752247
phrasemodel pt 2 0.589793
#arityphrasepenalty owner start_arity end_arity weight
#arityphrasepenalty pt 0 0 1.0
#arityphrasepenalty pt 1 2 -1.0
#phrasemodel mono 0 0.5
#wordpenalty weight
wordpenalty -2.844814
lm_file=example2/example2.4gram.lm.gz

tm_file=example2/example2.hiero.tm.gz
tm_format=hiero

glue_file=grammars/hiero.glue
glue_format=hiero

#lm config
use_srilm=true
lm_ceiling_cost=100
use_left_equivalent_state=false
use_right_equivalent_state=false
order=4


#tm config
span_limit=10
phrase_owner=pt
mono_owner=mono
begin_mono_owner=begin_mono
default_non_terminal=X
goalSymbol=S

#pruning config
fuzz1=0.1
fuzz2=0.1
max_n_items=30
relative_threshold=10.0
max_n_rules=50
rule_relative_threshold=10.0

#nbest config
use_unique_nbest=true
use_tree_nbest=false
add_combined_cost=true
top_n=300


#remoter lm server config,we should first prepare remote_symbol_tbl before starting any jobs
use_remote_lm_server=false
remote_symbol_tbl=./voc.remote.sym
num_remote_lm_servers=4
f_remote_server_list=./remote.lm.server.list
remote_lm_server_port=9000


#parallel deocoder: it cannot be used together with remote lm
num_parallel_decoders=1
parallel_files_prefix=/tmp/


###### model weights
#lm order weight
lm 1.0

#phrasemodel owner column(0-indexed) weight
phrasemodel pt 0 1.066893
phrasemodel pt 1 0.752247
phrasemodel pt 2 0.589793

#arityphrasepenalty owner start_arity end_arity weight
#arityphrasepenalty pt 0 0 1.0
#arityphrasepenalty pt 1 2 -1.0

#phrasemodel mono 0 0.5

#wordpenalty weight
wordpenalty -2.844814

12 changes: 6 additions & 6 deletions examples/ZMERT/params.txt
@@ -1,6 +1,6 @@
lm ||| 1.000000 Opt 0.1 +Inf +0.5 +1.5
phrasemodel pt 0 ||| 1.066893 Opt -Inf +Inf -1 +1
phrasemodel pt 1 ||| 0.752247 Opt -Inf +Inf -1 +1
phrasemodel pt 2 ||| 0.589793 Opt -Inf +Inf -1 +1
wordpenalty ||| -2.844814 Opt -Inf +Inf -5 0
normalization = absval 1 lm
lm ||| 1.000000 Opt 0.1 +Inf +0.5 +1.5
phrasemodel pt 0 ||| 1.066893 Opt -Inf +Inf -1 +1
phrasemodel pt 1 ||| 0.752247 Opt -Inf +Inf -1 +1
phrasemodel pt 2 ||| 0.589793 Opt -Inf +Inf -1 +1
wordpenalty ||| -2.844814 Opt -Inf +Inf -5 0
normalization = absval 1 lm
148 changes: 74 additions & 74 deletions examples/example/example.config.bloomfilterlm
@@ -1,74 +1,74 @@
lm_file=example/example.bloomfilter.lm.gz
tm_file=example/example.hiero.tm.gz
tm_format=hiero
glue_file=grammars/hiero.glue
glue_format=hiero
#lm config
use_srilm=false
use_bloomfilter_lm=true
lm_ceiling_cost=100
use_left_euqivalent_state=false
use_right_euqivalent_state=false
order=3
#tm config
span_limit=10
phrase_owner=pt
mono_owner=mono
begin_mono_owner=begin_mono
default_non_terminal=X
goalSymbol=S
#pruning config
fuzz1=0.1
fuzz2=0.1
max_n_items=30
relative_threshold=10.0
max_n_rules=50
rule_relative_threshold=10.0
#nbest config
use_unique_nbest=true
use_tree_nbest=false
add_combined_cost=true
top_n=300
#remoter lm server config,we should first prepare remote_symbol_tbl before starting any jobs
use_remote_lm_server=false
remote_symbol_tbl=./voc.remote.sym
num_remote_lm_servers=4
f_remote_server_list=./remote.lm.server.list
remote_lm_server_port=9000
#parallel deocoder: it cannot be used together with remote lm
num_parallel_decoders=1
parallel_files_prefix=.
#disk hg
save_disk_hg=false
###### model weights
#lm order weight
lm 1.000000
#phrasemodel owner column(0-indexed) weight
phrasemodel pt 0 1.066893
phrasemodel pt 1 0.752247
phrasemodel pt 2 0.589793
#arityphrasepenalty owner start_arity end_arity weight
#arityphrasepenalty pt 0 0 1.0
#arityphrasepenalty pt 1 2 -1.0
#phrasemodel mono 0 0.5
#wordpenalty weight
wordpenalty -2.844814
#latticecost 1.0
lm_file=example/example.bloomfilter.lm.gz

tm_file=example/example.hiero.tm.gz
tm_format=hiero

glue_file=grammars/hiero.glue
glue_format=hiero

#lm config
use_srilm=false
use_bloomfilter_lm=true
lm_ceiling_cost=100
use_left_euqivalent_state=false
use_right_euqivalent_state=false
order=3


#tm config
span_limit=10
phrase_owner=pt
mono_owner=mono
begin_mono_owner=begin_mono
default_non_terminal=X
goalSymbol=S

#pruning config
fuzz1=0.1
fuzz2=0.1
max_n_items=30
relative_threshold=10.0
max_n_rules=50
rule_relative_threshold=10.0

#nbest config
use_unique_nbest=true
use_tree_nbest=false
add_combined_cost=true
top_n=300


#remoter lm server config,we should first prepare remote_symbol_tbl before starting any jobs
use_remote_lm_server=false
remote_symbol_tbl=./voc.remote.sym
num_remote_lm_servers=4
f_remote_server_list=./remote.lm.server.list
remote_lm_server_port=9000


#parallel deocoder: it cannot be used together with remote lm
num_parallel_decoders=1
parallel_files_prefix=.

#disk hg
save_disk_hg=false

###### model weights
#lm order weight
lm 1.000000

#phrasemodel owner column(0-indexed) weight
phrasemodel pt 0 1.066893
phrasemodel pt 1 0.752247
phrasemodel pt 2 0.589793

#arityphrasepenalty owner start_arity end_arity weight
#arityphrasepenalty pt 0 0 1.0
#arityphrasepenalty pt 1 2 -1.0

#phrasemodel mono 0 0.5

#wordpenalty weight
wordpenalty -2.844814
#latticecost 1.0

0 comments on commit 42f4404

Please sign in to comment.