Skip to content

Commit

Permalink
Removed deprecated code, since 2.0 is not backwards compatible. Resol…
Browse files Browse the repository at this point in the history
…ves issue 558.

svn/trunk@8778
  • Loading branch information
stevenbird committed Apr 10, 2011
1 parent 6323bab commit 0c2984e
Show file tree
Hide file tree
Showing 11 changed files with 6 additions and 126 deletions.
10 changes: 0 additions & 10 deletions nltk/chunk/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -193,13 +193,3 @@ def batch_ne_chunk(tagged_sentences, binary=False):
chunker = nltk.data.load(chunker_pickle) chunker = nltk.data.load(chunker_pickle)
return chunker.batch_parse(tagged_sentences) return chunker.batch_parse(tagged_sentences)


######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class ChunkParseI(ChunkParserI, Deprecated):
"""Use nltk.ChunkParserI instead."""
class RegexpChunk(RegexpChunkParser, Deprecated):
"""Use nltk.RegexpChunkParser instead."""
class Regexp(RegexpParser, Deprecated):
"""Use nltk.RegexpParser instead."""
2 changes: 1 addition & 1 deletion nltk/classify/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -113,6 +113,6 @@
import numpy import numpy
from maxent import * from maxent import *
__all__ += ['MaxentClassifier', 'BinaryMaxentFeatureEncoding', __all__ += ['MaxentClassifier', 'BinaryMaxentFeatureEncoding',
'ConditionalExponentialClassifier', 'train_maxent_classifier'] 'ConditionalExponentialClassifier']
except ImportError: except ImportError:
pass pass
2 changes: 1 addition & 1 deletion nltk/metrics/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@


__all__ = ['ConfusionMatrix', 'accuracy', __all__ = ['ConfusionMatrix', 'accuracy',
'f_measure', 'log_likelihood', 'precision', 'recall', 'f_measure', 'log_likelihood', 'precision', 'recall',
'approxrand', 'edit_distance', 'edit_dist', 'windowdiff', 'approxrand', 'edit_distance', 'windowdiff',
'AnnotationTask', 'spearman_correlation', 'AnnotationTask', 'spearman_correlation',
'ranks_from_sequence', 'ranks_from_scores', 'ranks_from_sequence', 'ranks_from_scores',
'NgramAssocMeasures', 'BigramAssocMeasures', 'NgramAssocMeasures', 'BigramAssocMeasures',
Expand Down
43 changes: 0 additions & 43 deletions nltk/parse/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -94,46 +94,3 @@
'NonprojectiveDependencyParser', 'MaltParser', 'NonprojectiveDependencyParser', 'MaltParser',
] ]


######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class ParseI(ParserI, Deprecated):
"""Use nltk.ParserI instead."""
class AbstractParse(AbstractParser, Deprecated):
"""Use nltk.ParserI instead."""
class RecursiveDescent(RecursiveDescentParser, Deprecated):
"""Use nltk.RecursiveDescentParser instead."""
class SteppingRecursiveDescent(SteppingRecursiveDescentParser, Deprecated):
"""Use nltk.SteppingRecursiveDescentParser instead."""
class ShiftReduce(ShiftReduceParser, Deprecated):
"""Use nltk.ShiftReduceParser instead."""
class SteppingShiftReduce(SteppingShiftReduceParser, Deprecated):
"""Use nltk.SteppingShiftReduceParser instead."""
class EarleyChartParse(EarleyChartParser, Deprecated):
"""Use nltk.EarleyChartParser instead."""
class FeatureEarleyChartParse(FeatureEarleyChartParser, Deprecated):
"""Use nltk.FeatureEarleyChartParser instead."""
class ChartParse(ChartParser, Deprecated):
"""Use nltk.ChartParser instead."""
class SteppingChartParse(SteppingChartParser, Deprecated):
"""Use nltk.SteppingChartParser instead."""
class BottomUpChartParse(BottomUpProbabilisticChartParser, Deprecated):
"""Use nltk.BottomUpProbabilisticChartParser instead."""
class InsideParse(InsideChartParser, Deprecated):
"""Use nltk.InsideChartParser instead."""
class RandomParse(RandomChartParser, Deprecated):
"""Use nltk.RandomChartParser instead."""
class UnsortedParse(UnsortedChartParser, Deprecated):
"""Use nltk.UnsortedChartParser instead."""
class LongestParse(LongestChartParser, Deprecated):
"""Use nltk.LongestChartParser instead."""
class ViterbiParse(ViterbiParser, Deprecated):
"""Use nltk.ViterbiParser instead."""
class GrammarFile(Deprecated):
"""Use nltk.data.load() instead."""
# [xx] had directives: %start, %kimmo, %tagger_file?
def __init__(self, filename=None, verbose=False):
raise ValueError("GrammarFile is no longer supported -- "
"use nltk.data.load() instead.")

9 changes: 0 additions & 9 deletions nltk/parse/chart.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -1076,9 +1076,6 @@ def apply_iter(self, chart, grammar):
if chart.insert(new_edge, ()): if chart.insert(new_edge, ()):
yield new_edge yield new_edge


class CachedTopDownInitRule(TopDownInitRule, Deprecated):
"""Use L{TopDownInitRule} instead."""

class TopDownPredictRule(AbstractChartRule): class TopDownPredictRule(AbstractChartRule):
""" """
A rule licensing edges corresponding to the grammar productions A rule licensing edges corresponding to the grammar productions
Expand All @@ -1099,9 +1096,6 @@ def apply_iter(self, chart, grammar, edge):
if chart.insert(new_edge, ()): if chart.insert(new_edge, ()):
yield new_edge yield new_edge


class TopDownExpandRule(TopDownPredictRule, Deprecated):
"""Use TopDownPredictRule instead"""

class CachedTopDownPredictRule(TopDownPredictRule): class CachedTopDownPredictRule(TopDownPredictRule):
""" """
A cached version of L{TopDownPredictRule}. After the first time A cached version of L{TopDownPredictRule}. After the first time
Expand Down Expand Up @@ -1142,9 +1136,6 @@ def apply_iter(self, chart, grammar, edge):
# Record the fact that we've applied this rule. # Record the fact that we've applied this rule.
self._done[next, index] = (chart, grammar) self._done[next, index] = (chart, grammar)


class CachedTopDownExpandRule(CachedTopDownPredictRule, Deprecated):
"""Use L{CachedTopDownPredictRule} instead."""

#//////////////////////////////////////////////////////////// #////////////////////////////////////////////////////////////
# Bottom-Up Prediction # Bottom-Up Prediction
#//////////////////////////////////////////////////////////// #////////////////////////////////////////////////////////////
Expand Down
1 change: 0 additions & 1 deletion nltk/sem/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
'Valuation', 'Assignment', 'Model', 'Undefined', 'is_rel', 'set2rel', 'arity', 'Valuation', 'Assignment', 'Model', 'Undefined', 'is_rel', 'set2rel', 'arity',


# utility methods # utility methods
'text_parse', 'text_interpret', 'text_evaluate',
'batch_parse', 'batch_interpret', 'batch_evaluate', 'batch_parse', 'batch_interpret', 'batch_evaluate',
'root_semrep', 'root_semrep',
'parse_valuation_line', 'parse_valuation', 'parse_logic', 'skolemize', 'parse_valuation_line', 'parse_valuation', 'parse_logic', 'skolemize',
Expand Down
18 changes: 1 addition & 17 deletions nltk/stem/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -33,23 +33,7 @@


# Stemmers # Stemmers
'RegexpStemmer', 'PorterStemmer', 'LancasterStemmer', 'RegexpStemmer', 'PorterStemmer', 'LancasterStemmer',
'RSLPStemmer', 'WordNetLemmatizer', 'WordnetStemmer', 'RSLPStemmer', 'WordNetLemmatizer',
'ISRIStemmer', 'SnowballStemmer' 'ISRIStemmer', 'SnowballStemmer'
] ]


######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class StemI(StemmerI, Deprecated):
"""Use nltk.StemmerI instead."""
class Regexp(RegexpStemmer, Deprecated):
"""Use nltk.RegexpStemmer instead."""
class Porter(PorterStemmer, Deprecated):
"""Use nltk.PorterStemmer instead."""
class Lancaster(LancasterStemmer, Deprecated):
"""Use nltk.LancasterStemmer instead."""
class Wordnet(WordNetStemmer, Deprecated):
"""Use nltk.WordNetLemmatizer instead."""


16 changes: 0 additions & 16 deletions nltk/stem/wordnet.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@


from nltk.corpus.reader.wordnet import NOUN from nltk.corpus.reader.wordnet import NOUN
from nltk.corpus import wordnet as _wordnet from nltk.corpus import wordnet as _wordnet
from nltk.internals import Deprecated



class WordNetLemmatizer(object): class WordNetLemmatizer(object):
""" """
Expand Down Expand Up @@ -40,17 +38,3 @@ def __repr__(self):
print 'aardwolves ->', wnl.lemmatize('aardwolves') print 'aardwolves ->', wnl.lemmatize('aardwolves')
print 'abaci ->', wnl.lemmatize('abaci') print 'abaci ->', wnl.lemmatize('abaci')
print 'hardrock ->', wnl.lemmatize('hardrock') print 'hardrock ->', wnl.lemmatize('hardrock')


class WordnetStemmer(Deprecated, WordNetLemmatizer):
"""Use WordNetLemmatizer instead."""

def __init__(self):
WordNetLemmatizer.__init__(self)


class WordNetStemmer(Deprecated, WordNetLemmatizer):
"""Use WordNetLemmatizer instead."""

def __init__(self):
WordNetLemmatizer.__init__(self)
2 changes: 1 addition & 1 deletion nltk/tokenize/__init__.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@


__all__ = ['WhitespaceTokenizer', 'SpaceTokenizer', 'TabTokenizer', __all__ = ['WhitespaceTokenizer', 'SpaceTokenizer', 'TabTokenizer',
'LineTokenizer', 'RegexpTokenizer', 'BlanklineTokenizer', 'LineTokenizer', 'RegexpTokenizer', 'BlanklineTokenizer',
'WordPunctTokenizer', 'WordTokenizer', 'blankline_tokenize', 'WordPunctTokenizer', 'blankline_tokenize',
'wordpunct_tokenize', 'regexp_tokenize', 'word_tokenize', 'wordpunct_tokenize', 'regexp_tokenize', 'word_tokenize',
'SExprTokenizer', 'sexpr_tokenize', 'line_tokenize', 'SExprTokenizer', 'sexpr_tokenize', 'line_tokenize',
'PunktWordTokenizer', 'PunktSentenceTokenizer', 'PunktWordTokenizer', 'PunktSentenceTokenizer',
Expand Down
16 changes: 1 addition & 15 deletions nltk/tokenize/regexp.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import re import re
import sre_constants import sre_constants


from nltk.internals import convert_regexp_to_nongrouping, Deprecated from nltk.internals import convert_regexp_to_nongrouping


from api import * from api import *
from util import * from util import *
Expand Down Expand Up @@ -151,20 +151,6 @@ class WordPunctTokenizer(RegexpTokenizer):
def __init__(self): def __init__(self):
RegexpTokenizer.__init__(self, r'\w+|[^\w\s]+') RegexpTokenizer.__init__(self, r'\w+|[^\w\s]+')


class WordTokenizer(RegexpTokenizer, Deprecated):
"""
B{If you want to tokenize words, you should probably use
TreebankWordTokenizer or word_tokenize() instead.}
A tokenizer that divides a text into sequences of alphabetic
characters. Any non-alphabetic characters are discarded. E.g.:
>>> WordTokenizer().tokenize("She said 'hello'.")
['She', 'said', 'hello']
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\w+')

###################################################################### ######################################################################
#{ Tokenization Functions #{ Tokenization Functions
###################################################################### ######################################################################
Expand Down
13 changes: 1 addition & 12 deletions nltk/util.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from pprint import pprint from pprint import pprint
from nltk.compat import defaultdict from nltk.compat import defaultdict


from nltk.internals import Deprecated, slice_bounds from nltk.internals import slice_bounds


###################################################################### ######################################################################
# Short usage message # Short usage message
Expand Down Expand Up @@ -969,12 +969,6 @@ def __len__(self):
return max(len(lst) for lst in self._lists) return max(len(lst) for lst in self._lists)




class LazyMappedList(Deprecated, LazyMap):
"""Use LazyMap instead."""
def __init__(self, lst, func):
LazyMap.__init__(self, func, lst)


class LazyZip(LazyMap): class LazyZip(LazyMap):
""" """
A lazy sequence whose elements are tuples, each containing the i-th A lazy sequence whose elements are tuples, each containing the i-th
Expand Down Expand Up @@ -1050,11 +1044,6 @@ def __init__(self, lst):
LazyZip.__init__(self, xrange(len(lst)), lst) LazyZip.__init__(self, xrange(len(lst)), lst)




class LazyMappedChain(Deprecated, LazyConcatenation):
"""Use LazyConcatenation(LazyMap(func, lists)) instead."""
def __init__(self, lst, func):
LazyConcatenation.__init__(self, LazyMap(func, lst))

###################################################################### ######################################################################
# Binary Search in a File # Binary Search in a File
###################################################################### ######################################################################
Expand Down

0 comments on commit 0c2984e

Please sign in to comment.