diff --git a/README.md b/README.md
index dcc539b..22982c7 100644
--- a/README.md
+++ b/README.md
@@ -32,5 +32,6 @@ Challenges by day (4 days missed):
[Day 21](./day21) - (CodeFights) Helping Stephan
[Day 22](./day22) - Permutations of a list
[Day 23](./day23) - Strings Rearrangement
+[Day 24](./day24) - Strings Rearrangement (backtracking)
diff --git a/day23/README.md b/day23/README.md
index 631ec2c..c83c5ab 100644
--- a/day23/README.md
+++ b/day23/README.md
@@ -41,3 +41,18 @@ Any better way to approach this problem? I think so..
https://en.wikipedia.org/wiki/Hamiltonian_path_problem
+also check out this minified and slightly worse runtime version LOL (180 characters)
+
+```python
+from itertools import permutations as p
+
+def stringsRearrangement(i):
+ return any(n(x) for x in p(i))
+
+def n(i):
+ return len(i) <= 1 or (d(i[0], i[1]) and n(i[1:]))
+
+def d(w, a):
+ return sum(i != j for i, j in zip(w, a)) == 1
+```
+
diff --git a/day24/README.md b/day24/README.md
new file mode 100644
index 0000000..59ebc29
--- /dev/null
+++ b/day24/README.md
@@ -0,0 +1,30 @@
+Today's challenge is a continuation of [yesterday's](../day23)!
+
+## Ideas
+
+I wanted to take a different approach to this question; instead of doing a
+full brute-force exploration on all combinations of the inputArray,
+construct a graph and then do DFS from every node to find a possible
+Hamiltonian path. The existence of a path would verify whether there is
+a desired arrangement of the strings.
+
+I can construct the graph in `O(N2)` time where `N` is the number of
+elements in the inputArray, and also the number of vertices in my graph. I check
+every possible pair of strings to see whether they differ by exactly one place,
+and add an edge in the graph between the two if so.
+
+I can then run DFS from each node in `N * O(N) = O(N2)` time (this
+analysis doesn't seem right to me actually) to complete the algorithm.
+
+Overall, it comes out to a runtime of `O(n2)`. The space complexity
+scales in proportion to the number of vertices and edges I need to keep track
+of in my graph. Vertices increase with every additional element in the inputArray.
+Edges increase when there are higher frequencies of words in the inputArray
+that are closer to each other in edit distance.
+
+## Code
+
+[Python](./stringsRearrangementBacktracking.py) (unfinished)
+
+## Follow up
+
diff --git a/day24/stringsRearrangementBacktracking.py b/day24/stringsRearrangementBacktracking.py
new file mode 100644
index 0000000..57e727f
--- /dev/null
+++ b/day24/stringsRearrangementBacktracking.py
@@ -0,0 +1,92 @@
+def differByOne(word, anotherWord):
+ return sum(c1 != c2 for c1, c2 in zip(word, anotherWord)) == 1
+
+def tuplelizeDuplicates(inputArray):
+ dups = {elem:0 for elem in inputArray}
+ outputTuples = []
+ for elem in inputArray:
+ if elem in dups:
+ outputTuples.append((elem, dups[elem]))
+ dups[elem] += 1
+ return outputTuples
+
+def createGraph(inputTuples):
+ g = {elem:set() for elem in inputTuples}
+
+ size = len(inputTuples)
+
+ for i in xrange(size):
+ for j in xrange(size):
+
+ if i == j:
+ continue
+
+ v1 = inputTuples[i]
+ v2 = inputTuples[j]
+
+ if differByOne(v1[0], v2[0]):
+ g[v1].add(v2)
+ g[v2].add(v1)
+ return g
+
+def derp(graph, startTuple, visited=set()):
+ # do dfs
+ for vertexTuple in graph[startTuple]:
+ print vertexTuple
+ if vertexTuple not in visited:
+ visited.add(vertexTuple)
+ if derp(graph, vertexTuple, visited):
+ return True
+ print visited
+
+ if len(visited) == len(graph):
+ return True
+ return False
+
+def hamiltonianPath(graph):
+ if len(graph) == 0:
+ return True
+
+ print graph
+ for node in graph:
+ if derp(graph, node):
+ return True
+
+ return False
+
+def testDifferByOne():
+ assert not differByOne("", "")
+ assert not differByOne("a", "a")
+ assert not differByOne("aaa", "aaa")
+ assert not differByOne("abcdeff", "abcedff")
+ assert differByOne("a", "b")
+ assert differByOne("abc", "abb")
+ assert differByOne("abc", "bbc")
+ assert differByOne("abcdefg", "abcdefz")
+
+def testTuplelizeDuplicates():
+ assert tuplelizeDuplicates(["qq", "qq", "qq"]) == [("qq", 0), ("qq", 1), ("qq", 2)]
+
+def testCreateGraph():
+ assert createGraph(tuplelizeDuplicates(["aba", "bbb", "bab"])) == {("aba", 0): set([]), ("bbb", 0): set([("bab", 0)]), ("bab", 0): set([("bbb", 0)])}
+ assert createGraph(tuplelizeDuplicates(["qq", "qq", "qq"])) == {('qq', 1): set([]), ('qq', 0): set([]), ('qq', 2): set([])}
+ assert createGraph(tuplelizeDuplicates(["ab", "ad", "ef", "eg"])) == {('ab', 0): set([('ad', 0)]), ('ef', 0): set([('eg', 0)]), ('ad', 0): set([('ab', 0)]), ('eg', 0): set([('ef', 0)])}
+
+def testHamiltonianPath():
+ assert hamiltonianPath(createGraph([]))
+ assert not hamiltonianPath(createGraph(["aba", "bbb", "bab"]))
+ assert hamiltonianPath(createGraph(["ab", "bb", "aac"]))
+ assert not hamiltonianPath(createGraph(["qq", "qq", "qq"]))
+ assert hamiltonianPath(createGraph(["aaa", "aba", "aaa", "aba", "aaa"]))
+ assert not hamiltonianPath(createGraph(["ab", "ad", "ef", "eg"]))
+ assert hamiltonianPath(createGraph(["abc", "abx", "axx", "abx", "abc"]))
+ assert hamiltonianPath(createGraph(["f", "g", "a", "h"]))
+
+def main():
+ testDifferByOne()
+ testTuplelizeDuplicates()
+ testCreateGraph()
+ testHamiltonianPath()
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file