universaldetector.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. ######################## BEGIN LICENSE BLOCK ########################
  2. # The Original Code is Mozilla Universal charset detector code.
  3. #
  4. # The Initial Developer of the Original Code is
  5. # Netscape Communications Corporation.
  6. # Portions created by the Initial Developer are Copyright (C) 2001
  7. # the Initial Developer. All Rights Reserved.
  8. #
  9. # Contributor(s):
  10. # Mark Pilgrim - port to Python
  11. # Shy Shalom - original C code
  12. #
  13. # This library is free software; you can redistribute it and/or
  14. # modify it under the terms of the GNU Lesser General Public
  15. # License as published by the Free Software Foundation; either
  16. # version 2.1 of the License, or (at your option) any later version.
  17. #
  18. # This library is distributed in the hope that it will be useful,
  19. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. # Lesser General Public License for more details.
  22. #
  23. # You should have received a copy of the GNU Lesser General Public
  24. # License along with this library; if not, write to the Free Software
  25. # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  26. # 02110-1301 USA
  27. ######################### END LICENSE BLOCK #########################
  28. """
  29. Module containing the UniversalDetector detector class, which is the primary
  30. class a user of ``chardet`` should use.
  31. :author: Mark Pilgrim (initial port to Python)
  32. :author: Shy Shalom (original C code)
  33. :author: Dan Blanchard (major refactoring for 3.0)
  34. :author: Ian Cordasco
  35. """
  36. import codecs
  37. import logging
  38. import re
  39. from typing import List, Optional, Union
  40. from .charsetgroupprober import CharSetGroupProber
  41. from .charsetprober import CharSetProber
  42. from .enums import InputState, LanguageFilter, ProbingState
  43. from .escprober import EscCharSetProber
  44. from .latin1prober import Latin1Prober
  45. from .macromanprober import MacRomanProber
  46. from .mbcsgroupprober import MBCSGroupProber
  47. from .resultdict import ResultDict
  48. from .sbcsgroupprober import SBCSGroupProber
  49. from .utf1632prober import UTF1632Prober
  50. class UniversalDetector:
  51. """
  52. The ``UniversalDetector`` class underlies the ``chardet.detect`` function
  53. and coordinates all of the different charset probers.
  54. To get a ``dict`` containing an encoding and its confidence, you can simply
  55. run:
  56. .. code::
  57. u = UniversalDetector()
  58. u.feed(some_bytes)
  59. u.close()
  60. detected = u.result
  61. """
  62. MINIMUM_THRESHOLD = 0.20
  63. HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]")
  64. ESC_DETECTOR = re.compile(b"(\033|~{)")
  65. WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]")
  66. ISO_WIN_MAP = {
  67. "iso-8859-1": "Windows-1252",
  68. "iso-8859-2": "Windows-1250",
  69. "iso-8859-5": "Windows-1251",
  70. "iso-8859-6": "Windows-1256",
  71. "iso-8859-7": "Windows-1253",
  72. "iso-8859-8": "Windows-1255",
  73. "iso-8859-9": "Windows-1254",
  74. "iso-8859-13": "Windows-1257",
  75. }
  76. # Based on https://encoding.spec.whatwg.org/#names-and-labels
  77. # but altered to match Python names for encodings and remove mappings
  78. # that break tests.
  79. LEGACY_MAP = {
  80. "ascii": "Windows-1252",
  81. "iso-8859-1": "Windows-1252",
  82. "tis-620": "ISO-8859-11",
  83. "iso-8859-9": "Windows-1254",
  84. "gb2312": "GB18030",
  85. "euc-kr": "CP949",
  86. "utf-16le": "UTF-16",
  87. }
  88. def __init__(
  89. self,
  90. lang_filter: LanguageFilter = LanguageFilter.ALL,
  91. should_rename_legacy: bool = False,
  92. ) -> None:
  93. self._esc_charset_prober: Optional[EscCharSetProber] = None
  94. self._utf1632_prober: Optional[UTF1632Prober] = None
  95. self._charset_probers: List[CharSetProber] = []
  96. self.result: ResultDict = {
  97. "encoding": None,
  98. "confidence": 0.0,
  99. "language": None,
  100. }
  101. self.done = False
  102. self._got_data = False
  103. self._input_state = InputState.PURE_ASCII
  104. self._last_char = b""
  105. self.lang_filter = lang_filter
  106. self.logger = logging.getLogger(__name__)
  107. self._has_win_bytes = False
  108. self.should_rename_legacy = should_rename_legacy
  109. self.reset()
  110. @property
  111. def input_state(self) -> int:
  112. return self._input_state
  113. @property
  114. def has_win_bytes(self) -> bool:
  115. return self._has_win_bytes
  116. @property
  117. def charset_probers(self) -> List[CharSetProber]:
  118. return self._charset_probers
  119. def reset(self) -> None:
  120. """
  121. Reset the UniversalDetector and all of its probers back to their
  122. initial states. This is called by ``__init__``, so you only need to
  123. call this directly in between analyses of different documents.
  124. """
  125. self.result = {"encoding": None, "confidence": 0.0, "language": None}
  126. self.done = False
  127. self._got_data = False
  128. self._has_win_bytes = False
  129. self._input_state = InputState.PURE_ASCII
  130. self._last_char = b""
  131. if self._esc_charset_prober:
  132. self._esc_charset_prober.reset()
  133. if self._utf1632_prober:
  134. self._utf1632_prober.reset()
  135. for prober in self._charset_probers:
  136. prober.reset()
  137. def feed(self, byte_str: Union[bytes, bytearray]) -> None:
  138. """
  139. Takes a chunk of a document and feeds it through all of the relevant
  140. charset probers.
  141. After calling ``feed``, you can check the value of the ``done``
  142. attribute to see if you need to continue feeding the
  143. ``UniversalDetector`` more data, or if it has made a prediction
  144. (in the ``result`` attribute).
  145. .. note::
  146. You should always call ``close`` when you're done feeding in your
  147. document if ``done`` is not already ``True``.
  148. """
  149. if self.done:
  150. return
  151. if not byte_str:
  152. return
  153. if not isinstance(byte_str, bytearray):
  154. byte_str = bytearray(byte_str)
  155. # First check for known BOMs, since these are guaranteed to be correct
  156. if not self._got_data:
  157. # If the data starts with BOM, we know it is UTF
  158. if byte_str.startswith(codecs.BOM_UTF8):
  159. # EF BB BF UTF-8 with BOM
  160. self.result = {
  161. "encoding": "UTF-8-SIG",
  162. "confidence": 1.0,
  163. "language": "",
  164. }
  165. elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
  166. # FF FE 00 00 UTF-32, little-endian BOM
  167. # 00 00 FE FF UTF-32, big-endian BOM
  168. self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""}
  169. elif byte_str.startswith(b"\xFE\xFF\x00\x00"):
  170. # FE FF 00 00 UCS-4, unusual octet order BOM (3412)
  171. self.result = {
  172. # TODO: This encoding is not supported by Python. Should remove?
  173. "encoding": "X-ISO-10646-UCS-4-3412",
  174. "confidence": 1.0,
  175. "language": "",
  176. }
  177. elif byte_str.startswith(b"\x00\x00\xFF\xFE"):
  178. # 00 00 FF FE UCS-4, unusual octet order BOM (2143)
  179. self.result = {
  180. # TODO: This encoding is not supported by Python. Should remove?
  181. "encoding": "X-ISO-10646-UCS-4-2143",
  182. "confidence": 1.0,
  183. "language": "",
  184. }
  185. elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
  186. # FF FE UTF-16, little endian BOM
  187. # FE FF UTF-16, big endian BOM
  188. self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""}
  189. self._got_data = True
  190. if self.result["encoding"] is not None:
  191. self.done = True
  192. return
  193. # If none of those matched and we've only see ASCII so far, check
  194. # for high bytes and escape sequences
  195. if self._input_state == InputState.PURE_ASCII:
  196. if self.HIGH_BYTE_DETECTOR.search(byte_str):
  197. self._input_state = InputState.HIGH_BYTE
  198. elif (
  199. self._input_state == InputState.PURE_ASCII
  200. and self.ESC_DETECTOR.search(self._last_char + byte_str)
  201. ):
  202. self._input_state = InputState.ESC_ASCII
  203. self._last_char = byte_str[-1:]
  204. # next we will look to see if it is appears to be either a UTF-16 or
  205. # UTF-32 encoding
  206. if not self._utf1632_prober:
  207. self._utf1632_prober = UTF1632Prober()
  208. if self._utf1632_prober.state == ProbingState.DETECTING:
  209. if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT:
  210. self.result = {
  211. "encoding": self._utf1632_prober.charset_name,
  212. "confidence": self._utf1632_prober.get_confidence(),
  213. "language": "",
  214. }
  215. self.done = True
  216. return
  217. # If we've seen escape sequences, use the EscCharSetProber, which
  218. # uses a simple state machine to check for known escape sequences in
  219. # HZ and ISO-2022 encodings, since those are the only encodings that
  220. # use such sequences.
  221. if self._input_state == InputState.ESC_ASCII:
  222. if not self._esc_charset_prober:
  223. self._esc_charset_prober = EscCharSetProber(self.lang_filter)
  224. if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
  225. self.result = {
  226. "encoding": self._esc_charset_prober.charset_name,
  227. "confidence": self._esc_charset_prober.get_confidence(),
  228. "language": self._esc_charset_prober.language,
  229. }
  230. self.done = True
  231. # If we've seen high bytes (i.e., those with values greater than 127),
  232. # we need to do more complicated checks using all our multi-byte and
  233. # single-byte probers that are left. The single-byte probers
  234. # use character bigram distributions to determine the encoding, whereas
  235. # the multi-byte probers use a combination of character unigram and
  236. # bigram distributions.
  237. elif self._input_state == InputState.HIGH_BYTE:
  238. if not self._charset_probers:
  239. self._charset_probers = [MBCSGroupProber(self.lang_filter)]
  240. # If we're checking non-CJK encodings, use single-byte prober
  241. if self.lang_filter & LanguageFilter.NON_CJK:
  242. self._charset_probers.append(SBCSGroupProber())
  243. self._charset_probers.append(Latin1Prober())
  244. self._charset_probers.append(MacRomanProber())
  245. for prober in self._charset_probers:
  246. if prober.feed(byte_str) == ProbingState.FOUND_IT:
  247. self.result = {
  248. "encoding": prober.charset_name,
  249. "confidence": prober.get_confidence(),
  250. "language": prober.language,
  251. }
  252. self.done = True
  253. break
  254. if self.WIN_BYTE_DETECTOR.search(byte_str):
  255. self._has_win_bytes = True
  256. def close(self) -> ResultDict:
  257. """
  258. Stop analyzing the current document and come up with a final
  259. prediction.
  260. :returns: The ``result`` attribute, a ``dict`` with the keys
  261. `encoding`, `confidence`, and `language`.
  262. """
  263. # Don't bother with checks if we're already done
  264. if self.done:
  265. return self.result
  266. self.done = True
  267. if not self._got_data:
  268. self.logger.debug("no data received!")
  269. # Default to ASCII if it is all we've seen so far
  270. elif self._input_state == InputState.PURE_ASCII:
  271. self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""}
  272. # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
  273. elif self._input_state == InputState.HIGH_BYTE:
  274. prober_confidence = None
  275. max_prober_confidence = 0.0
  276. max_prober = None
  277. for prober in self._charset_probers:
  278. if not prober:
  279. continue
  280. prober_confidence = prober.get_confidence()
  281. if prober_confidence > max_prober_confidence:
  282. max_prober_confidence = prober_confidence
  283. max_prober = prober
  284. if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
  285. charset_name = max_prober.charset_name
  286. assert charset_name is not None
  287. lower_charset_name = charset_name.lower()
  288. confidence = max_prober.get_confidence()
  289. # Use Windows encoding name instead of ISO-8859 if we saw any
  290. # extra Windows-specific bytes
  291. if lower_charset_name.startswith("iso-8859"):
  292. if self._has_win_bytes:
  293. charset_name = self.ISO_WIN_MAP.get(
  294. lower_charset_name, charset_name
  295. )
  296. # Rename legacy encodings with superset encodings if asked
  297. if self.should_rename_legacy:
  298. charset_name = self.LEGACY_MAP.get(
  299. (charset_name or "").lower(), charset_name
  300. )
  301. self.result = {
  302. "encoding": charset_name,
  303. "confidence": confidence,
  304. "language": max_prober.language,
  305. }
  306. # Log all prober confidences if none met MINIMUM_THRESHOLD
  307. if self.logger.getEffectiveLevel() <= logging.DEBUG:
  308. if self.result["encoding"] is None:
  309. self.logger.debug("no probers hit minimum threshold")
  310. for group_prober in self._charset_probers:
  311. if not group_prober:
  312. continue
  313. if isinstance(group_prober, CharSetGroupProber):
  314. for prober in group_prober.probers:
  315. self.logger.debug(
  316. "%s %s confidence = %s",
  317. prober.charset_name,
  318. prober.language,
  319. prober.get_confidence(),
  320. )
  321. else:
  322. self.logger.debug(
  323. "%s %s confidence = %s",
  324. group_prober.charset_name,
  325. group_prober.language,
  326. group_prober.get_confidence(),
  327. )
  328. return self.result