Permalink
Browse files

out with chardet, in with charade

  • Loading branch information...
1 parent ccd14dd commit 1960dabafdc09a3cd2a4878de99965e421786ee8 @kennethreitz committed Nov 29, 2012
Showing with 11,714 additions and 3,946 deletions.
  1. +1 −1 NOTICE
  2. +27 −0 requests/packages/charade/__init__.py
  3. +923 −0 requests/packages/charade/big5freq.py
  4. +42 −0 requests/packages/charade/big5prober.py
  5. +228 −0 requests/packages/charade/chardistribution.py
  6. +107 −0 requests/packages/charade/charsetgroupprober.py
  7. +62 −0 requests/packages/charade/charsetprober.py
  8. +64 −0 requests/packages/charade/codingstatemachine.py
  9. +38 −0 requests/packages/charade/compat.py
  10. +39 −0 requests/packages/charade/constants.py
  11. +84 −0 requests/packages/charade/escprober.py
  12. +240 −0 requests/packages/charade/escsm.py
  13. +89 −0 requests/packages/charade/eucjpprober.py
  14. +594 −0 requests/packages/charade/euckrfreq.py
  15. +42 −0 requests/packages/charade/euckrprober.py
  16. +426 −0 requests/packages/charade/euctwfreq.py
  17. +42 −0 requests/packages/charade/euctwprober.py
  18. +471 −0 requests/packages/charade/gb2312freq.py
  19. +42 −0 requests/packages/charade/gb2312prober.py
  20. +281 −0 requests/packages/charade/hebrewprober.py
  21. +567 −0 requests/packages/charade/jisfreq.py
  22. +564 −0 requests/packages/charade/jpcntx.py
  23. +262 −0 requests/packages/charade/langbulgarianmodel.py
  24. +329 −0 requests/packages/charade/langcyrillicmodel.py
  25. +225 −0 requests/packages/charade/langgreekmodel.py
  26. +201 −0 requests/packages/charade/langhebrewmodel.py
  27. +225 −0 requests/packages/charade/langhungarianmodel.py
  28. +200 −0 requests/packages/charade/langthaimodel.py
  29. +142 −0 requests/packages/charade/latin1prober.py
  30. +86 −0 requests/packages/charade/mbcharsetprober.py
  31. +52 −0 requests/packages/charade/mbcsgroupprober.py
  32. +529 −0 requests/packages/charade/mbcssm.py
  33. +123 −0 requests/packages/charade/sbcharsetprober.py
  34. +64 −0 requests/packages/charade/sbcsgroupprober.py
  35. +90 −0 requests/packages/charade/sjisprober.py
  36. +173 −0 requests/packages/charade/universaldetector.py
  37. +76 −0 requests/packages/charade/utf8prober.py
  38. +26 −26 requests/packages/chardet/__init__.py
  39. +41 −41 requests/packages/chardet/big5prober.py
  40. +201 −200 requests/packages/chardet/chardistribution.py
  41. +97 −97 requests/packages/chardet/charsetgroupprober.py
  42. +3 −3 requests/packages/chardet/charsetprober.py
  43. +60 −60 requests/packages/chardet/codingstatemachine.py
  44. +12 −0 requests/packages/chardet/compat.py
  45. +240 −240 requests/packages/chardet/escsm.py
  46. +87 −87 requests/packages/chardet/eucjpprober.py
  47. +41 −41 requests/packages/chardet/euckrprober.py
  48. +41 −41 requests/packages/chardet/euctwprober.py
  49. +41 −41 requests/packages/chardet/gb2312prober.py
  50. +269 −269 requests/packages/chardet/hebrewprober.py
  51. +211 −210 requests/packages/chardet/jpcntx.py
  52. +228 −228 requests/packages/chardet/langbulgarianmodel.py
  53. +329 −329 requests/packages/chardet/langcyrillicmodel.py
  54. +225 −225 requests/packages/chardet/langgreekmodel.py
  55. +201 −201 requests/packages/chardet/langhebrewmodel.py
  56. +225 −225 requests/packages/chardet/langhungarianmodel.py
  57. +200 −200 requests/packages/chardet/langthaimodel.py
  58. +138 −137 requests/packages/chardet/latin1prober.py
  59. +83 −83 requests/packages/chardet/mbcharsetprober.py
  60. +514 −514 requests/packages/chardet/mbcssm.py
  61. +110 −109 requests/packages/chardet/sbcharsetprober.py
  62. +86 −86 requests/packages/chardet/sjisprober.py
  63. +21 −20 requests/packages/chardet/test.py
  64. +157 −155 requests/packages/chardet/universaldetector.py
  65. +77 −77 requests/packages/chardet/utf8prober.py
View
@@ -24,7 +24,7 @@ FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TOR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
-Chardet2 License
+Charade License
================
This library is free software; you can redistribute it and/or
@@ -0,0 +1,27 @@
+######################## BEGIN LICENSE BLOCK ########################
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+__version__ = "1.1"
+
+
+def detect(aBuf):
+ from . import universaldetector
+ u = universaldetector.UniversalDetector()
+ u.reset()
+ u.feed(aBuf)
+ u.close()
+ return u.result
Oops, something went wrong.
@@ -0,0 +1,42 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import Big5DistributionAnalysis
+from .mbcssm import Big5SMModel
+
+
+class Big5Prober(MultiByteCharSetProber):
+ def __init__(self):
+ MultiByteCharSetProber.__init__(self)
+ self._mCodingSM = CodingStateMachine(Big5SMModel)
+ self._mDistributionAnalyzer = Big5DistributionAnalysis()
+ self.reset()
+
+ def get_charset_name(self):
+ return "Big5"
@@ -0,0 +1,228 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
+ EUCTW_TYPICAL_DISTRIBUTION_RATIO)
+from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
+ EUCKR_TYPICAL_DISTRIBUTION_RATIO)
+from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
+ GB2312_TYPICAL_DISTRIBUTION_RATIO)
+from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
+ BIG5_TYPICAL_DISTRIBUTION_RATIO)
+from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
+ JIS_TYPICAL_DISTRIBUTION_RATIO)
+from .compat import wrap_ord
+
+ENOUGH_DATA_THRESHOLD = 1024
+SURE_YES = 0.99
+SURE_NO = 0.01
+
+
+class CharDistributionAnalysis:
+ def __init__(self):
+ # Mapping table to get frequency order from char order (get from
+ # GetOrder())
+ self._mCharToFreqOrder = None
+ # Size of above table
+ self._mTableSize = None
+ # This is a constant value which varies from language to language,
+ # used in calculating confidence. See
+ # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
+ # for further detail.
+ self._mTypicalDistributionRatio = None
+ self.reset()
+
+ def reset(self):
+ """reset analyser, clear any state"""
+ # If this flag is set to constants.True, detection is done and
+ # conclusion has been made
+ self._mDone = False
+ # Total characters encountered
+ self._mTotalChars = 0
+ # The number of characters whose frequency order is less than 512
+ self._mFreqChars = 0
+
+ def feed(self, aStr, aCharLen):
+ """feed a character with known length"""
+ if aCharLen == 2:
+ # we only care about 2-bytes character in our distribution analysis
+ order = self.get_order(aStr)
+ else:
+ order = -1
+ if order >= 0:
+ self._mTotalChars += 1
+ # order is valid
+ if order < self._mTableSize:
+ if 512 > self._mCharToFreqOrder[order]:
+ self._mFreqChars += 1
+
+ def get_confidence(self):
+ """return confidence based on existing data"""
+ # if we didn't receive any character in our consideration range, return
+ # negative answer
+ if self._mTotalChars <= 0:
+ return SURE_NO
+
+ if self._mTotalChars != self._mFreqChars:
+ r = self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
+ * self._mTypicalDistributionRatio)
+ if r < SURE_YES:
+ return r
+
+ # normalize confidence (we don't want to be 100% sure)
+ return SURE_YES
+
+ def got_enough_data(self):
+ # It is not necessary to receive all data to draw conclusion.
+ # For charset detection, certain amount of data is enough
+ return self._mTotalChars > ENOUGH_DATA_THRESHOLD
+
+ def get_order(self, aStr):
+ # We do not handle characters based on the original encoding string,
+ # but convert this encoding string to a number, here called order.
+ # This allows multiple encodings of a language to share one frequency
+ # table.
+ return -1
+
+
+class EUCTWDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ CharDistributionAnalysis.__init__(self)
+ self._mCharToFreqOrder = EUCTWCharToFreqOrder
+ self._mTableSize = EUCTW_TABLE_SIZE
+ self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, aStr):
+ # for euc-TW encoding, we are interested
+ # first byte range: 0xc4 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ if aStr[0] >= '\xC4':
+ return 94 * (wrap_ord(aStr[0]) - 0xC4) + wrap_ord(aStr[1]) - 0xA1
+ else:
+ return -1
+
+
+class EUCKRDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ CharDistributionAnalysis.__init__(self)
+ self._mCharToFreqOrder = EUCKRCharToFreqOrder
+ self._mTableSize = EUCKR_TABLE_SIZE
+ self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, aStr):
+ # for euc-KR encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ if aStr[0] >= '\xB0':
+ return 94 * (wrap_ord(aStr[0]) - 0xB0) + wrap_ord(aStr[1]) - 0xA1
+ else:
+ return -1
+
+
+class GB2312DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ CharDistributionAnalysis.__init__(self)
+ self._mCharToFreqOrder = GB2312CharToFreqOrder
+ self._mTableSize = GB2312_TABLE_SIZE
+ self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, aStr):
+ # for GB2312 encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ if (aStr[0] >= '\xB0') and (aStr[1] >= '\xA1'):
+ return 94 * (wrap_ord(aStr[0]) - 0xB0) + wrap_ord(aStr[1]) - 0xA1
+ else:
+ return -1
+
+
+class Big5DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ CharDistributionAnalysis.__init__(self)
+ self._mCharToFreqOrder = Big5CharToFreqOrder
+ self._mTableSize = BIG5_TABLE_SIZE
+ self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, aStr):
+ # for big5 encoding, we are interested
+ # first byte range: 0xa4 -- 0xfe
+ # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ if aStr[0] >= '\xA4':
+ if aStr[1] >= '\xA1':
+ return (157 * (wrap_ord(aStr[0]) - 0xA4) + wrap_ord(aStr[1])
+ - 0xA1 + 63)
+ else:
+ return (157 * (wrap_ord(aStr[0]) - 0xA4) + wrap_ord(aStr[1])
+ - 0x40)
+ else:
+ return -1
+
+
+class SJISDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ CharDistributionAnalysis.__init__(self)
+ self._mCharToFreqOrder = JISCharToFreqOrder
+ self._mTableSize = JIS_TABLE_SIZE
+ self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, aStr):
+ # for sjis encoding, we are interested
+ # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
+ # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
+ # no validation needed here. State machine has done that
+ if (aStr[0] >= '\x81') and (aStr[0] <= '\x9F'):
+ order = 188 * (wrap_ord(aStr[0]) - 0x81)
+ elif (aStr[0] >= '\xE0') and (aStr[0] <= '\xEF'):
+ order = 188 * (wrap_ord(aStr[0]) - 0xE0 + 31)
+ else:
+ return -1
+ order = order + wrap_ord(aStr[1]) - 0x40
+ if aStr[1] > '\x7F':
+ order = -1
+ return order
+
+
+class EUCJPDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ CharDistributionAnalysis.__init__(self)
+ self._mCharToFreqOrder = JISCharToFreqOrder
+ self._mTableSize = JIS_TABLE_SIZE
+ self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, aStr):
+ # for euc-JP encoding, we are interested
+ # first byte range: 0xa0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ if aStr[0] >= '\xA0':
+ return 94 * (wrap_ord(aStr[0]) - 0xA1) + wrap_ord(aStr[1]) - 0xa1
+ else:
+ return -1
Oops, something went wrong.

0 comments on commit 1960dab

Please sign in to comment.