Skip to content

Commit

Permalink
updated metadata output
Browse files Browse the repository at this point in the history
  • Loading branch information
jonadsimon committed Jan 29, 2022
1 parent fdebbf9 commit d552718
Show file tree
Hide file tree
Showing 8 changed files with 70 additions and 99 deletions.
3 changes: 3 additions & 0 deletions analytics_helper_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ def get_num_letters_excess(words, board_size=15):
def get_mean_word_length(words):
return np.mean([len(word) for word in words])

def get_max_word_length(words):
return np.max([len(word) for word in words])

def get_num_overlaps_exact(w1, w2):
overlaps = 0
# Assume that w1 always has the same fixed orientation
Expand Down
90 changes: 29 additions & 61 deletions make_puzzle_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,55 +13,20 @@
import warnings

# TODO: CLEAN UP THIS MESS OF A FUNCTION
def get_word_set_stats(words):
def get_word_set_stats(words, board_size):
board_words = [w.board for w in words]
collisions_avoidance_matrix = np.array(ahf.get_collision_avoidance_probability_pairwise(board_words, board_size))
tril_inds = np.tril_indices(len(words), k=-1)

num_words = len(board_words)
letter_excess = ahf.get_num_letters_excess(board_words)
mean_len = ahf.get_mean_word_length(board_words)
total_overlaps = ahf.get_num_overlaps_total(board_words)
max_len = ahf.get_max_word_length(board_words)

return num_words, letter_excess, mean_len, total_overlaps
min_collision_avoidance_prob = collisions_avoidance_matrix[tril_inds].min()
mean_collision_avoidance_prob = collisions_avoidance_matrix[tril_inds].mean()

# REPLACE WITH WORD-LENGTH NORMALIZED OVERLAP MEASURES
def enrich_word_set(chosen_word_tuples, extra_word_tuples, max_mean_word_len=5.5):

num_words, letter_excess, mean_len, total_overlaps = get_word_set_stats(chosen_word_tuples)
print(f"\nInitial stats: num_words={num_words}, letter_excess={letter_excess:.2f}, mean_len={mean_len:.2f}, total_overlaps={total_overlaps}")

# Rather than hard-coding a static max_mean_word_len, make it a function of what we started with
# max_mean_word_len = 1.1 * mean_len

while True:
# Find least-overlapping word in the set

per_word_overlaps = [ahf.get_num_overlaps_per_word(wt.board, [wt.board for wt in chosen_word_tuples]) for wt in chosen_word_tuples]
least_overlapping_word_idx = np.argmin(per_word_overlaps)
least_overlapping_word = chosen_word_tuples[least_overlapping_word_idx]

chosen_word_tuples = chosen_word_tuples[:least_overlapping_word_idx] + chosen_word_tuples[least_overlapping_word_idx+1:]
extra_word_tuples = [least_overlapping_word] + extra_word_tuples

per_word_overlaps = [ahf.get_num_overlaps_per_word(wt.board, [wt.board for wt in chosen_word_tuples]) for wt in extra_word_tuples]
most_overlapping_word_idx = np.argmax(per_word_overlaps)
most_overlapping_word = extra_word_tuples[most_overlapping_word_idx]

chosen_word_tuples = chosen_word_tuples + [most_overlapping_word]
extra_word_tuples = extra_word_tuples[:most_overlapping_word_idx] + extra_word_tuples[most_overlapping_word_idx+1:]

num_words, letter_excess, mean_len, total_overlaps = get_word_set_stats(chosen_word_tuples)
print(f"\nRemoved: '{least_overlapping_word.pretty}', Added: '{most_overlapping_word.pretty}'")
print(f"Enriched stats: num_words={num_words}, letter_excess={letter_excess:.2f}, mean_len={mean_len:.2f}, total_overlaps={total_overlaps}")

if least_overlapping_word == most_overlapping_word:
print("\nRedundant transposition --> break")
break

if mean_len > max_mean_word_len:
print("\nPassed max_mean_word_len --> break")
break

return chosen_word_tuples
return num_words, letter_excess, mean_len, max_len, min_collision_avoidance_prob, mean_collision_avoidance_prob


def get_words_for_board(word_tuples, board_size, packing_constant=1.1, max_word_len=8):
Expand All @@ -85,15 +50,12 @@ def get_words_for_board(word_tuples, board_size, packing_constant=1.1, max_word_
if not num_words:
raise ValueError(f"Too few semantic neighbor words to pack a {board_size}x{board_size} board.")

# chosen_word_tuples = enrich_word_set(word_tuples[:num_words], word_tuples[num_words:])
# return sorted(chosen_word_tuples, key=lambda wt: len(wt.board))

num_words, packing_level, mean_word_len, total_overlap = get_word_set_stats(word_tuples[:num_words])
num_words, packing_level, mean_word_len, max_word_len, min_collision_avoidance_prob, mean_collision_avoidance_prob = get_word_set_stats(word_tuples[:num_words], board_size)
print("\nWord stats:")
print(f" num_words: {num_words}")
print(f" packing_level: {packing_level:.3f}")
print(f" mean_word_len: {mean_word_len:.2f}")
print(f" total_overlap: {total_overlap}")
print(f" word_len (mean/max): {mean_word_len:.2f} / {max_word_len}")
print(f" collision_avoidance_prob (min/mean): {min_collision_avoidance_prob:.6f} / {mean_collision_avoidance_prob:.6f}")

return word_tuples[:num_words]

Expand All @@ -111,12 +73,12 @@ def get_words_for_board_optimize(word_tuples, board_size, packing_constant=1.1):
max_word_tuple_idx_naive = (np.cumsum([len(wt.board) for wt in word_tuples]) < packing_constant * board_size**2).sum()
word_tuples_naive = word_tuples[:max_word_tuple_idx_naive]

num_words, packing_level, mean_word_len, total_overlap = get_word_set_stats(word_tuples_naive)
num_words, packing_level, mean_word_len, max_word_len, min_collision_avoidance_prob, mean_collision_avoidance_prob = get_word_set_stats(word_tuples_naive, board_size)
print("\nPre-optimization word stats:")
print(f" num_words: {num_words}")
print(f" packing_level: {packing_level:.3f}")
print(f" mean_word_len: {mean_word_len:.2f}")
print(f" total_overlap: {total_overlap}")
print(f" word_len (mean/max): {mean_word_len:.2f} / {max_word_len}")
print(f" collision_avoidance_prob (min/mean): {min_collision_avoidance_prob:.6f} / {mean_collision_avoidance_prob:.6f}")

# Run the script
p = subprocess.Popen(["/Applications/MiniZincIDE.app/Contents/Resources/minizinc", "--solver", "Chuffed", "--all-solutions", "MiniZinc_scripts/find_dense_overlap_word_subset.mzn", "tmp/pre_data.dzn"], stdout=subprocess.PIPE)
Expand Down Expand Up @@ -144,12 +106,12 @@ def get_words_for_board_optimize(word_tuples, board_size, packing_constant=1.1):
# NEED TO MAKE SURE IT'S DONE WRT SEMANITIC SIMILARITY
# STORE THIS INFORMATION WITHIN THE WORD OBJECT

num_words, packing_level, mean_word_len, total_overlap = get_word_set_stats(word_tuples)
num_words, packing_level, mean_word_len, max_word_len, min_collision_avoidance_prob, mean_collision_avoidance_prob = get_word_set_stats(word_tuples, board_size)
print("\nPost-optimization word stats:")
print(f" num_words: {num_words}")
print(f" packing_level: {packing_level:.3f}")
print(f" mean_word_len: {mean_word_len:.2f}")
print(f" total_overlap: {total_overlap}")
print(f" word_len (mean/max): {mean_word_len:.2f} / {max_word_len}")
print(f" collision_avoidance_prob (min/mean): {min_collision_avoidance_prob:.6f} / {mean_collision_avoidance_prob:.6f}")

return word_tuples

Expand Down Expand Up @@ -226,6 +188,7 @@ def find_word_in_board(board, word):
# Can be >1 location if the word was added to the board in multiple places.
# Can ALSO happen if the word is a palindrome; this is a false positive case and should be removed.
word_locations = []
word_deltas = []
for y in range(len(board)):
for x in range(len(board)):
for dy in (-1,0,1):
Expand All @@ -243,7 +206,8 @@ def find_word_in_board(board, word):
new_loc = [(y+i*dy, x+i*dx) for i in range(len(word))]
if all(sorted(loc) != sorted(new_loc) for loc in word_locations):
word_locations.append(new_loc)
return word_locations
word_deltas.append((dy,dx))
return word_locations, word_deltas


def find_words_in_board(board, word_tuples):
Expand All @@ -258,7 +222,8 @@ def find_words_in_board(board, word_tuples):
Return the words satisfying the different conditions
"""
word_locations_on_board = [find_word_in_board(board, wt.board) for wt in word_tuples]
word_locations_on_board, word_deltas_on_board = zip(*[find_word_in_board(board, wt.board) for wt in word_tuples])
flattened_deltas = [d for deltas in word_deltas_on_board for d in deltas]

covered_up_words = []
doubled_up_words = []
Expand All @@ -278,7 +243,7 @@ def find_words_in_board(board, word_tuples):
w1_letter_positions_remaining = [pos for pos in w1_letter_positions_remaining if pos != loc]
if not w1_letter_positions_remaining:
covered_up_words.append(wt)
return covered_up_words, doubled_up_words
return covered_up_words, doubled_up_words, flattened_deltas


def make_puzzle(topic, board_size, packing_constant, strategy, optimize_words, relatedness_cutoff, n_proc=4):
Expand Down Expand Up @@ -356,7 +321,7 @@ def make_puzzle(topic, board_size, packing_constant, strategy, optimize_words, r
# TODO FOR LATER: EDGE-CASE WHERE REMOVING ONE OVERLAPPING WORD RENDERS ANOTHER NON-OVERLAPPING
# TODO FOR LATER: ANOTHER EDGE-CASE WHERE A WORD GETS ADDED WHEN THE HIDDEN LETTERS GET FILLED IN
board = [line.strip().split() for line in raw_board.decode("utf-8").strip().split("\n")] # THIS IS REDUNDANT
covered_up_words, doubled_up_words = find_words_in_board(board, word_tuples_to_fit)
covered_up_words, doubled_up_words, deltas = find_words_in_board(board, word_tuples_to_fit)
# If word appears multiple times, print a warning and try again
if doubled_up_words:
# warnings.warn(f"\nwords appear more than once on the board: {', '.join([wt.pretty for wt in doubled_up_words])}\nboard will be discarded and regenerated\n")
Expand Down Expand Up @@ -387,13 +352,16 @@ def make_puzzle(topic, board_size, packing_constant, strategy, optimize_words, r
blank_locs = [(i,j) for i,row in enumerate(board) for j,letter in enumerate(row) if letter == "_"]
# If number of blanks is exactly 0, no special word is needed
if len(blank_locs) not in hidden_word_tuple_dict and len(blank_locs) != 0:
print(len(blank_locs), blank_locs)
print(hidden_word_tuple_dict)
raise ValueError("Number of remaining blanks does not fit any available word.")
raise ValueError(f"Number of remaining blanks does not fit any available word: {len(blank_locs)}")
k = 0
for i, j in blank_locs:
board[i][j] = hidden_word_tuple_dict[len(blank_locs)].board[k]
k += 1
delta_cntr = Counter(deltas)
print(f"\nhorizontal (fwd/bwd): {delta_cntr[(0,1)]}/{delta_cntr[(0,-1)]}")
print(f"vertical (fwd/bwd): {delta_cntr[(1,0)]}/{delta_cntr[(-1,0)]}")
print(f"diagonal du (fwd/bwd): {delta_cntr[(-1,1)]}/{delta_cntr[(1,-1)]}")
print(f"diagonal ud (fwd/bwd): {delta_cntr[(1,1)]}/{delta_cntr[(-1,-1)]}\n")
# Print the topic above the board
print(f"\nTopic: {' / '.join(topic)}\n\n")
for row in board:
Expand Down
34 changes: 17 additions & 17 deletions other.rtf
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,24 @@
\margl1440\margr1440\vieww12820\viewh11240\viewkind0
\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\qc\partightenfactor0

\f0\fs32 \cf2 Topic: colorful / colors\
\f0\fs32 \cf2 Topic: cuddle\
\
\
C Y A N W O R B R O A N T R P\
B O R Y E N O T I N C T I Y I\
Y R G E F G Y E W T Y C N E N\
H E A I L I N W T O H D T R E\
S V U V D T R A O I L G U G E\
A L S L E N T U H H H L I A R\
L I E U B M I O A C S W E R G\
F S E L P R U P M O T L E Y B\
L E T I J E V T S L A M A L A\
U P I G A D I O H O I T A Z G\
S I N H Z D V U A U N C U U L\
H A G T Z E I C D R K R I W O\
D E E P Y N D H E N E S I A S\
S H O T N I A P I D E U H R S\
H U E S K I N P P Y A R G B C\
P L O P R U L S T W I R L C O\
T N N M M O S O B W A K E N E\
I E E H C T U L C A V O R T C\
B T V H T S E N U Z Z L E M A\
A R I S S I K P U C K E R U R\
H A L W O E M S W A D D L E B\
O M S S C B R A I N W A S H M\
C S P Y H A F F E C T I O N E\
U Q O M E S O C I A L I Z E P\
D U N P M L N H U G G I N G A\
D E G A I E O I N I B B L E M\
L E I T C E L P O E P R L T P\
E Z N H A P U K E F L I R T E\
I E G Y L E F O I S T S U U R\
L O V E Z O D B A W L I C K P\
\
\
aurify azure black blue brave braw bright brown change colour cyan deep flashy flush gaudy gloss gray green grey guise hued hues imbue indigo jazzy light motley mottle paint pied pink purple redden rich roan sepia shade shot showy silver skin stain tinct tinge tint tone touch vivid white yellow}
affection bawl bosom brainwash cavort chemical clutch cohabit cuddle curl doze elope embrace flirt foist freshen hugging kiss lick liven love meow nest nibble nuzzle pamper people plop pucker puke purr sleep slurp smarten socialize sponging squeeze swaddle sympathy twirl waken}
8 changes: 4 additions & 4 deletions tmp/data1.dzn
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
n = 15;
m = 52;
m = 44;

max_len = 6;
word_lens = [ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 ];
words = [| P, I, E, D, E, E | B, R, A, W, E, E | D, E, E, P, E, E | R, O, A, N, E, E | S, K, I, N, E, E | P, I, N, K, E, E | B, L, U, E, E, E | C, Y, A, N, E, E | G, R, A, Y, E, E | R, I, C, H, E, E | T, O, N, E, E, E | T, I, N, T, E, E | S, H, O, T, E, E | H, U, E, S, E, E | H, U, E, D, E, E | G, R, E, Y, E, E | J, A, Z, Z, Y, E | A, Z, U, R, E, E | T, O, U, C, H, E | B, R, O, W, N, E | G, A, U, D, Y, E | B, R, A, V, E, E | G, L, O, S, S, E | G, R, E, E, N, E | P, A, I, N, T, E | I, M, B, U, E, E | S, H, A, D, E, E | S, T, A, I, N, E | F, L, U, S, H, E | S, E, P, I, A, E | W, H, I, T, E, E | B, L, A, C, K, E | T, I, N, G, E, E | G, U, I, S, E, E | L, I, G, H, T, E | S, H, O, W, Y, E | V, I, V, I, D, E | T, I, N, C, T, E | M, O, T, L, E, Y | C, H, R, O, M, A | M, O, T, T, L, E | S, I, L, V, E, R | R, E, D, D, E, N | B, R, I, G, H, T | C, O, L, O, U, R | P, U, R, P, L, E | A, U, R, I, F, Y | A, B, L, A, Z, E | I, N, D, I, G, O | C, H, A, N, G, E | F, L, A, S, H, Y | Y, E, L, L, O, W |];
max_len = 7;
word_lens = [ 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
words = [| B, A, C, H, E, E, E | M, A, S, S, E, E, E | P, O, P, E, E, E, E | P, I, A, N, O, E, E | I, T, A, L, Y, E, E | O, P, E, R, A, E, E | M, U, S, I, C, E, E | L, I, S, Z, T, E, E | C, E, L, L, O, E, E | E, T, U, D, E, E, E | C, H, O, I, R, E, E | V, E, R, D, I, E, E | S, A, L, O, N, E, E | P, A, R, I, S, E, E | C, A, N, T, O, E, E | O, R, G, A, N, E, E | L, A, R, G, O, E, E | M, O, T, E, T, E, E | F, L, U, T, E, E, E | W, A, L, T, Z, E, E | H, A, Y, D, N, E, E | W, R, I, T, E, R, E | R, I, D, D, I, M, E | V, I, O, L, I, N, E | S, O, N, A, T, A, E | L, O, N, D, O, N, E | C, Z, E, C, H, S, E | M, U, N, I, C, H, E | P, R, A, G, U, E, E | T, E, C, H, N, O, E | V, I, E, N, N, A, E | B, R, A, H, M, S, E | W, A, R, S, A, W, E | H, A, N, D, E, L, E | W, A, G, N, E, R, E | M, E, L, O, D, Y, E | P, O, L, A, N, D, E | L, E, G, A, T, O, E | A, U, T, H, O, R, E | C, H, O, P, I, N, E | M, O, Z, A, R, T, E | F, R, A, N, C, E, E | M, A, H, L, E, R, E | M, A, Z, U, R, K, A |];

pos_var_strat = first_fail;
pos_val_strat = indomain_median;
8 changes: 4 additions & 4 deletions tmp/data2.dzn
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
n = 15;
m = 52;
m = 44;

max_len = 6;
word_lens = [ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 ];
words = [| G, R, E, Y, E, E | P, I, N, K, E, E | T, O, N, E, E, E | B, L, U, E, E, E | S, K, I, N, E, E | H, U, E, D, E, E | H, U, E, S, E, E | S, H, O, T, E, E | R, O, A, N, E, E | T, I, N, T, E, E | B, R, A, W, E, E | D, E, E, P, E, E | G, R, A, Y, E, E | P, I, E, D, E, E | C, Y, A, N, E, E | R, I, C, H, E, E | J, A, Z, Z, Y, E | P, A, I, N, T, E | G, R, E, E, N, E | W, H, I, T, E, E | T, O, U, C, H, E | G, A, U, D, Y, E | F, L, U, S, H, E | T, I, N, C, T, E | S, T, A, I, N, E | B, R, A, V, E, E | V, I, V, I, D, E | G, L, O, S, S, E | B, L, A, C, K, E | A, Z, U, R, E, E | S, H, A, D, E, E | S, H, O, W, Y, E | G, U, I, S, E, E | B, R, O, W, N, E | S, E, P, I, A, E | L, I, G, H, T, E | I, M, B, U, E, E | T, I, N, G, E, E | P, U, R, P, L, E | S, I, L, V, E, R | A, U, R, I, F, Y | I, N, D, I, G, O | C, O, L, O, U, R | B, R, I, G, H, T | A, B, L, A, Z, E | Y, E, L, L, O, W | C, H, A, N, G, E | M, O, T, L, E, Y | F, L, A, S, H, Y | R, E, D, D, E, N | C, H, R, O, M, A | M, O, T, T, L, E |];
max_len = 7;
word_lens = [ 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
words = [| M, A, S, S, E, E, E | P, O, P, E, E, E, E | B, A, C, H, E, E, E | O, R, G, A, N, E, E | S, A, L, O, N, E, E | P, A, R, I, S, E, E | C, A, N, T, O, E, E | O, P, E, R, A, E, E | M, U, S, I, C, E, E | H, A, Y, D, N, E, E | L, A, R, G, O, E, E | V, E, R, D, I, E, E | W, A, L, T, Z, E, E | F, L, U, T, E, E, E | M, O, T, E, T, E, E | P, I, A, N, O, E, E | L, I, S, Z, T, E, E | E, T, U, D, E, E, E | I, T, A, L, Y, E, E | C, E, L, L, O, E, E | C, H, O, I, R, E, E | S, O, N, A, T, A, E | M, A, H, L, E, R, E | R, I, D, D, I, M, E | M, O, Z, A, R, T, E | L, O, N, D, O, N, E | F, R, A, N, C, E, E | W, A, R, S, A, W, E | V, I, E, N, N, A, E | L, E, G, A, T, O, E | C, Z, E, C, H, S, E | T, E, C, H, N, O, E | P, O, L, A, N, D, E | W, R, I, T, E, R, E | M, U, N, I, C, H, E | C, H, O, P, I, N, E | M, E, L, O, D, Y, E | A, U, T, H, O, R, E | H, A, N, D, E, L, E | V, I, O, L, I, N, E | W, A, G, N, E, R, E | B, R, A, H, M, S, E | P, R, A, G, U, E, E | M, A, Z, U, R, K, A |];

pos_var_strat = first_fail;
pos_val_strat = indomain_median;
8 changes: 4 additions & 4 deletions tmp/data3.dzn
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
n = 15;
m = 52;
m = 44;

max_len = 6;
word_lens = [ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 ];
words = [| B, R, A, W, E, E | R, O, A, N, E, E | S, H, O, T, E, E | D, E, E, P, E, E | T, I, N, T, E, E | P, I, N, K, E, E | T, O, N, E, E, E | G, R, E, Y, E, E | B, L, U, E, E, E | G, R, A, Y, E, E | P, I, E, D, E, E | H, U, E, D, E, E | C, Y, A, N, E, E | S, K, I, N, E, E | R, I, C, H, E, E | H, U, E, S, E, E | A, Z, U, R, E, E | J, A, Z, Z, Y, E | W, H, I, T, E, E | F, L, U, S, H, E | S, H, A, D, E, E | L, I, G, H, T, E | P, A, I, N, T, E | T, I, N, C, T, E | S, E, P, I, A, E | B, R, O, W, N, E | G, A, U, D, Y, E | B, L, A, C, K, E | S, T, A, I, N, E | I, M, B, U, E, E | G, U, I, S, E, E | T, O, U, C, H, E | B, R, A, V, E, E | G, R, E, E, N, E | T, I, N, G, E, E | V, I, V, I, D, E | S, H, O, W, Y, E | G, L, O, S, S, E | I, N, D, I, G, O | S, I, L, V, E, R | A, B, L, A, Z, E | P, U, R, P, L, E | M, O, T, L, E, Y | C, H, R, O, M, A | M, O, T, T, L, E | C, O, L, O, U, R | A, U, R, I, F, Y | Y, E, L, L, O, W | R, E, D, D, E, N | C, H, A, N, G, E | B, R, I, G, H, T | F, L, A, S, H, Y |];
max_len = 7;
word_lens = [ 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
words = [| M, A, S, S, E, E, E | B, A, C, H, E, E, E | P, O, P, E, E, E, E | P, I, A, N, O, E, E | S, A, L, O, N, E, E | C, H, O, I, R, E, E | P, A, R, I, S, E, E | L, I, S, Z, T, E, E | O, R, G, A, N, E, E | H, A, Y, D, N, E, E | E, T, U, D, E, E, E | M, U, S, I, C, E, E | C, E, L, L, O, E, E | F, L, U, T, E, E, E | I, T, A, L, Y, E, E | L, A, R, G, O, E, E | V, E, R, D, I, E, E | M, O, T, E, T, E, E | W, A, L, T, Z, E, E | O, P, E, R, A, E, E | C, A, N, T, O, E, E | B, R, A, H, M, S, E | M, O, Z, A, R, T, E | L, E, G, A, T, O, E | H, A, N, D, E, L, E | C, Z, E, C, H, S, E | L, O, N, D, O, N, E | C, H, O, P, I, N, E | S, O, N, A, T, A, E | W, A, R, S, A, W, E | W, A, G, N, E, R, E | A, U, T, H, O, R, E | M, A, H, L, E, R, E | T, E, C, H, N, O, E | V, I, E, N, N, A, E | F, R, A, N, C, E, E | R, I, D, D, I, M, E | P, O, L, A, N, D, E | V, I, O, L, I, N, E | M, U, N, I, C, H, E | M, E, L, O, D, Y, E | P, R, A, G, U, E, E | W, R, I, T, E, R, E | M, A, Z, U, R, K, A |];

pos_var_strat = first_fail;
pos_val_strat = indomain_median;
Loading

0 comments on commit d552718

Please sign in to comment.