Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FpGroup: Implemented rewriting systems #12893

Merged
merged 20 commits into from Jul 31, 2017
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
31 changes: 31 additions & 0 deletions sympy/combinatorics/fp_groups.py
Expand Up @@ -8,6 +8,7 @@
from sympy.utilities import public
from sympy.utilities.iterables import flatten
from sympy.combinatorics.free_groups import FreeGroupElement, free_group, zero_mul_simp
from sympy.combinatorics.rewritingsystem import RewritingSystem

from itertools import chain, product
from bisect import bisect_left
Expand Down Expand Up @@ -70,11 +71,41 @@ def __new__(cls, fr_grp, relators):

obj._order = None
obj._center = None

obj._rewriting_system = RewritingSystem(obj)
return obj

def _generators(self):
return self.free_group.generators

def make_confluent(self):
'''
Try to make the group's rewriting system confluent

'''
self._rewriting_system.make_confluent()
return

def reduce(self, word):
'''
Return the reduced form of `word` in `self` according to the group's
rewriting system. If it's confluent, the reduced form is the unique normal
form of the word in the group.

'''
return self._rewriting_system.reduce(word)

def equals(self, word1, word2):
'''
Compare `word1` and `word2` for equality in the group
using the group's rewriting system. If the system is
confluent, the returned answer is necessarily correct.
(If it isn't, `False` could be returned in some cases
where in fact `word1 == word2`)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably None should be returned instead of False if the system is not confluent.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That would make sense.


'''
return self.reduce(word1*word2**-1) == self.free_group.identity

def subgroup(self, gens, C=None):
'''
Return the subgroup generated by `gens` using the
Expand Down
31 changes: 24 additions & 7 deletions sympy/combinatorics/free_groups.py
Expand Up @@ -600,21 +600,33 @@ def commutator(self, other):
else:
return self.inverse()*other.inverse()*self*other

def eliminate_words(self, words, _all=False):
def eliminate_words(self, words, _all=False, inverse=True):
'''
Replace each subword from the dictionary `words` by words[subword].
If words is a list, replace the words by the identity.

'''
again = True
new = self
if isinstance(words, dict):
for sub in words:
new = new.eliminate_word(sub, words[sub], _all=_all)
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse)
if new != prev:
again = True
else:
for sub in words:
new = new.eliminate_word(sub, _all=_all)
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, _all=_all, inverse=inverse)
if new != prev:
again = True
return new

def eliminate_word(self, gen, by=None, _all=False):
def eliminate_word(self, gen, by=None, _all=False, inverse=True):
"""
For an associative word `self`, a subword `gen`, and an associative
word `by` (identity by default), return the associative word obtained by
Expand Down Expand Up @@ -658,6 +670,8 @@ def eliminate_word(self, gen, by=None, _all=False):
i = word.subword_index(gen)
k = 1
except ValueError:
if not inverse:
return word
try:
i = word.subword_index(gen**-1)
k = -1
Expand Down Expand Up @@ -861,7 +875,7 @@ def generator_count(self, gen):
s = gen.array_form[0]
return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])

def subword(self, from_i, to_j):
def subword(self, from_i, to_j, strict=True):
"""
For an associative word `self` and two positive integers `from_i` and
`to_j`, `subword` returns the subword of `self` that begins at position
Expand All @@ -878,6 +892,9 @@ def subword(self, from_i, to_j):

"""
group = self.group
if not strict:
from_i = max(from_i, 0)
to_j = min(len(self), to_j)
if from_i < 0 or to_j > len(self):
raise ValueError("`from_i`, `to_j` must be positive and no greater than "
"the length of associative word")
Expand Down
206 changes: 206 additions & 0 deletions sympy/combinatorics/rewritingsystem.py
@@ -0,0 +1,206 @@
from __future__ import print_function, division

from sympy import S
from sympy.combinatorics.free_groups import FreeGroupElement

class RewritingSystem(object):
'''
A class implementing rewriting systems for `FpGroup`s.

References
==========
[1] Epstein, D., Holt, D. and Rees, S. (1991).
The use of Knuth-Bendix methods to solve the word problem in automatic groups.
Journal of Symbolic Computation, 12(4-5), pp.397-414.

[2] GAP's Manual on its KBMAG package
https://www.gap-system.org/Manuals/pkg/kbmag-1.5.3/doc/manual.pdf

'''
def __init__(self, group):
self.group = group
self.alphabet = group.generators
self._is_confluent = None

# these values are taken from [2]
self.maxeqns = 32767 # max rules
self.tidyint = 100 # rules before tidying

# dictionary of reductions
self.rules = {}
self._init_rules()

@property
def is_confluent(self):
'''
Return `True` if the system is confluent

'''
if self._is_confluent is None:
self._is_confluent = self._check_confluence()
return self._is_confluent

def _init_rules(self):
rels = self.group.relators[:]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A local copy doesn't seem necessary.

identity = self.group.free_group.identity
for r in rels:
self.add_rule(r, identity)
self._remove_redundancies()
return

def add_rule(self, w1, w2):
new_keys = set()
if len(self.rules) + 1 > self.maxeqns:
return ValueError("Too many rules were defined.")

if w1 < w2:
s1 = w1
w1 = w2
w2 = s1
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The swap can be a single line.


s1 = w1
s2 = w2

# The following is the equivalent of checking
# s1 for overlaps with the implicit reductions
# {g*g**-1 -> <identity>} and {g**-1*g -> <identity>}
# for any generator g without installing the
# redundant rules that would result from processing
# the overlaps. See [1], Section 3 for details.

# overlaps on the right
while len(s1) - len(s2) > 0:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the difference is 1 and no cancelling occurs, then it may become -1. It seems that the rule will not be stored.

g = s1[len(s1)-1]
s1 = s1.subword(0, len(s1)-1)
s2 = s2*g**-1
if len(s1) - len(s2) in [0, 1, 2]:
if s2 < s1:
self.rules[s1] = s2
new_keys.add(s1)
elif s1 < s2:
self.rules[s2] = s1
new_keys.add(s2)

# overlaps on the left
while len(w1) - len(w2) > 1:
g = w1[0]
w1 = w1.subword(1, len(w1))
w2 = g**-1*w2
if len(w1) - len(w2) in [0, 1, 2]:
if w2 < w1:
self.rules[w1] = w2
new_keys.add(w1)
elif w1 < w2:
self.rules[w2] = w1
new_keys.add(w2)

return new_keys

def _remove_redundancies(self):
'''
Reduce left- and right-hand sides of reduction rules
and remove redundant equations (i.e. those for which
lhs == rhs)

'''
rules = self.rules.copy()
for r in rules:
v = self.reduce(r, exclude=r)
w = self.reduce(rules[r])
if v != r:
del self.rules[r]
if v != w:
self.add_rule(v, w)
else:
self.rules[v] = w
return

def make_confluent(self, check=False):
'''
Try to make the system confluent using the Knuth-Bendix
completion algorithm

'''
lhs = list(self.rules.keys())

def _overlaps(r1, r2):
len1 = len(r1)
len2 = len(r2)
result = []
j = 0
while j < len1 + len2 - 1:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could this be a for loop like for j in range(1, len1 + len2)?

j += 1
if (r1.subword(len1 - j, len1 + len2 - j, strict=False)
== r2.subword(j - len1, j, strict=False)):
a = r1.subword(0, len1-j, strict=False)
a = a*r2.subword(0, j-len1, strict=False)
b = r2.subword(j-len1, j, strict=False)
c = r2.subword(j, len2, strict=False)
c = c*r1.subword(len1 + len2 - j, len1, strict=False)
result.append(a*b*c)
return result

def _process_overlap(w, r1, r2):
s = w.eliminate_word(r1, self.rules[r1])
t = w.eliminate_word(r2, self.rules[r2])
if self.reduce(s) != self.reduce(t):
new_keys = self.add_rule(t, s)
return new_keys
return

added = 0
for i, r1 in enumerate(lhs):
# this could be lhs[i+1:] to not
# check each pair twice but lhs
# is extended in the loop and the new
# elements have to be checked with the
# preceding ones. there is probably a better way
# to handle this
for r2 in lhs:
overlaps = _overlaps(r1, r2)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it necessary to do this also when r1 and r2 are the same? It seems that, in that case, the overlap w is also the same, and then _process_overlap(w, r1, r2) would do nothing.

if not overlaps:
continue
for w in overlaps:
new_keys = _process_overlap(w, r1, r2)
if new_keys:
if check:
return False
added += 1
if added > self.tidyint:
# tidy up
self._remove_redundancies()
lhs.extend([n for n in new_keys if n in self.rules])

self._is_confluent = True
if check:
return True
else:
self._remove_redundancies()
return

def _check_confluence(self):
return self.make_confluent(check=True)

def reduce(self, word, exclude=None):
'''
Apply reduction rules to `word` excluding the reduction rule
for the lhs equal to `exclude`

'''
rules = {r: self.rules[r] for r in self.rules if r != exclude}
# the following is essentially `eliminate_words()` code from the
# `FreeGroupElement` class, the only difference being the first
# "if" statement
again = True
new = word
while again:
again = False
for r in rules:
prev = new
if len(r) == len(rules[r]):
new = new.eliminate_word(r, rules[r], _all=True, inverse=False)
else:
new = new.eliminate_word(r, rules[r], _all=True)
if new != prev:
again = True
return new
19 changes: 19 additions & 0 deletions sympy/combinatorics/tests/test_rewriting.py
@@ -0,0 +1,19 @@
from sympy.combinatorics.fp_groups import FpGroup
from sympy.combinatorics.free_groups import free_group

def test_rewriting():
F, a, b = free_group("a, b")
G = FpGroup(F, [a*b*a**-1*b**-1])
a, b = G.generators
R = G._rewriting_system
assert R.is_confluent == False
R.make_confluent()
assert R.is_confluent == True

assert G.reduce(b**3*a**4*b**-2*a) == a**5*b
assert G.equals(b**2*a**-1*b, b**4*a**-1*b**-1)

G = FpGroup(F, [a**3, b**3, (a*b)**2])
R = G._rewriting_system
assert R.is_confluent
assert G.reduce(b*a**-1*b**-1*a**3*b**4*a**-1*b**-15) == b*a