models.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. from __future__ import annotations
  2. from encodings.aliases import aliases
  3. from hashlib import sha256
  4. from json import dumps
  5. from re import sub
  6. from typing import Any, Iterator, List, Tuple
  7. from .constant import RE_POSSIBLE_ENCODING_INDICATION, TOO_BIG_SEQUENCE
  8. from .utils import iana_name, is_multi_byte_encoding, unicode_range
  9. class CharsetMatch:
  10. def __init__(
  11. self,
  12. payload: bytes,
  13. guessed_encoding: str,
  14. mean_mess_ratio: float,
  15. has_sig_or_bom: bool,
  16. languages: CoherenceMatches,
  17. decoded_payload: str | None = None,
  18. preemptive_declaration: str | None = None,
  19. ):
  20. self._payload: bytes = payload
  21. self._encoding: str = guessed_encoding
  22. self._mean_mess_ratio: float = mean_mess_ratio
  23. self._languages: CoherenceMatches = languages
  24. self._has_sig_or_bom: bool = has_sig_or_bom
  25. self._unicode_ranges: list[str] | None = None
  26. self._leaves: list[CharsetMatch] = []
  27. self._mean_coherence_ratio: float = 0.0
  28. self._output_payload: bytes | None = None
  29. self._output_encoding: str | None = None
  30. self._string: str | None = decoded_payload
  31. self._preemptive_declaration: str | None = preemptive_declaration
  32. def __eq__(self, other: object) -> bool:
  33. if not isinstance(other, CharsetMatch):
  34. if isinstance(other, str):
  35. return iana_name(other) == self.encoding
  36. return False
  37. return self.encoding == other.encoding and self.fingerprint == other.fingerprint
  38. def __lt__(self, other: object) -> bool:
  39. """
  40. Implemented to make sorted available upon CharsetMatches items.
  41. """
  42. if not isinstance(other, CharsetMatch):
  43. raise ValueError
  44. chaos_difference: float = abs(self.chaos - other.chaos)
  45. coherence_difference: float = abs(self.coherence - other.coherence)
  46. # Below 1% difference --> Use Coherence
  47. if chaos_difference < 0.01 and coherence_difference > 0.02:
  48. return self.coherence > other.coherence
  49. elif chaos_difference < 0.01 and coherence_difference <= 0.02:
  50. # When having a difficult decision, use the result that decoded as many multi-byte as possible.
  51. # preserve RAM usage!
  52. if len(self._payload) >= TOO_BIG_SEQUENCE:
  53. return self.chaos < other.chaos
  54. return self.multi_byte_usage > other.multi_byte_usage
  55. return self.chaos < other.chaos
  56. @property
  57. def multi_byte_usage(self) -> float:
  58. return 1.0 - (len(str(self)) / len(self.raw))
  59. def __str__(self) -> str:
  60. # Lazy Str Loading
  61. if self._string is None:
  62. self._string = str(self._payload, self._encoding, "strict")
  63. return self._string
  64. def __repr__(self) -> str:
  65. return f"<CharsetMatch '{self.encoding}' bytes({self.fingerprint})>"
  66. def add_submatch(self, other: CharsetMatch) -> None:
  67. if not isinstance(other, CharsetMatch) or other == self:
  68. raise ValueError(
  69. "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
  70. other.__class__
  71. )
  72. )
  73. other._string = None # Unload RAM usage; dirty trick.
  74. self._leaves.append(other)
  75. @property
  76. def encoding(self) -> str:
  77. return self._encoding
  78. @property
  79. def encoding_aliases(self) -> list[str]:
  80. """
  81. Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
  82. """
  83. also_known_as: list[str] = []
  84. for u, p in aliases.items():
  85. if self.encoding == u:
  86. also_known_as.append(p)
  87. elif self.encoding == p:
  88. also_known_as.append(u)
  89. return also_known_as
  90. @property
  91. def bom(self) -> bool:
  92. return self._has_sig_or_bom
  93. @property
  94. def byte_order_mark(self) -> bool:
  95. return self._has_sig_or_bom
  96. @property
  97. def languages(self) -> list[str]:
  98. """
  99. Return the complete list of possible languages found in decoded sequence.
  100. Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
  101. """
  102. return [e[0] for e in self._languages]
  103. @property
  104. def language(self) -> str:
  105. """
  106. Most probable language found in decoded sequence. If none were detected or inferred, the property will return
  107. "Unknown".
  108. """
  109. if not self._languages:
  110. # Trying to infer the language based on the given encoding
  111. # Its either English or we should not pronounce ourselves in certain cases.
  112. if "ascii" in self.could_be_from_charset:
  113. return "English"
  114. # doing it there to avoid circular import
  115. from charset_normalizer.cd import encoding_languages, mb_encoding_languages
  116. languages = (
  117. mb_encoding_languages(self.encoding)
  118. if is_multi_byte_encoding(self.encoding)
  119. else encoding_languages(self.encoding)
  120. )
  121. if len(languages) == 0 or "Latin Based" in languages:
  122. return "Unknown"
  123. return languages[0]
  124. return self._languages[0][0]
  125. @property
  126. def chaos(self) -> float:
  127. return self._mean_mess_ratio
  128. @property
  129. def coherence(self) -> float:
  130. if not self._languages:
  131. return 0.0
  132. return self._languages[0][1]
  133. @property
  134. def percent_chaos(self) -> float:
  135. return round(self.chaos * 100, ndigits=3)
  136. @property
  137. def percent_coherence(self) -> float:
  138. return round(self.coherence * 100, ndigits=3)
  139. @property
  140. def raw(self) -> bytes:
  141. """
  142. Original untouched bytes.
  143. """
  144. return self._payload
  145. @property
  146. def submatch(self) -> list[CharsetMatch]:
  147. return self._leaves
  148. @property
  149. def has_submatch(self) -> bool:
  150. return len(self._leaves) > 0
  151. @property
  152. def alphabets(self) -> list[str]:
  153. if self._unicode_ranges is not None:
  154. return self._unicode_ranges
  155. # list detected ranges
  156. detected_ranges: list[str | None] = [unicode_range(char) for char in str(self)]
  157. # filter and sort
  158. self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
  159. return self._unicode_ranges
  160. @property
  161. def could_be_from_charset(self) -> list[str]:
  162. """
  163. The complete list of encoding that output the exact SAME str result and therefore could be the originating
  164. encoding.
  165. This list does include the encoding available in property 'encoding'.
  166. """
  167. return [self._encoding] + [m.encoding for m in self._leaves]
  168. def output(self, encoding: str = "utf_8") -> bytes:
  169. """
  170. Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
  171. Any errors will be simply ignored by the encoder NOT replaced.
  172. """
  173. if self._output_encoding is None or self._output_encoding != encoding:
  174. self._output_encoding = encoding
  175. decoded_string = str(self)
  176. if (
  177. self._preemptive_declaration is not None
  178. and self._preemptive_declaration.lower()
  179. not in ["utf-8", "utf8", "utf_8"]
  180. ):
  181. patched_header = sub(
  182. RE_POSSIBLE_ENCODING_INDICATION,
  183. lambda m: m.string[m.span()[0] : m.span()[1]].replace(
  184. m.groups()[0],
  185. iana_name(self._output_encoding).replace("_", "-"), # type: ignore[arg-type]
  186. ),
  187. decoded_string[:8192],
  188. count=1,
  189. )
  190. decoded_string = patched_header + decoded_string[8192:]
  191. self._output_payload = decoded_string.encode(encoding, "replace")
  192. return self._output_payload # type: ignore
  193. @property
  194. def fingerprint(self) -> str:
  195. """
  196. Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
  197. """
  198. return sha256(self.output()).hexdigest()
  199. class CharsetMatches:
  200. """
  201. Container with every CharsetMatch items ordered by default from most probable to the less one.
  202. Act like a list(iterable) but does not implements all related methods.
  203. """
  204. def __init__(self, results: list[CharsetMatch] | None = None):
  205. self._results: list[CharsetMatch] = sorted(results) if results else []
  206. def __iter__(self) -> Iterator[CharsetMatch]:
  207. yield from self._results
  208. def __getitem__(self, item: int | str) -> CharsetMatch:
  209. """
  210. Retrieve a single item either by its position or encoding name (alias may be used here).
  211. Raise KeyError upon invalid index or encoding not present in results.
  212. """
  213. if isinstance(item, int):
  214. return self._results[item]
  215. if isinstance(item, str):
  216. item = iana_name(item, False)
  217. for result in self._results:
  218. if item in result.could_be_from_charset:
  219. return result
  220. raise KeyError
  221. def __len__(self) -> int:
  222. return len(self._results)
  223. def __bool__(self) -> bool:
  224. return len(self._results) > 0
  225. def append(self, item: CharsetMatch) -> None:
  226. """
  227. Insert a single match. Will be inserted accordingly to preserve sort.
  228. Can be inserted as a submatch.
  229. """
  230. if not isinstance(item, CharsetMatch):
  231. raise ValueError(
  232. "Cannot append instance '{}' to CharsetMatches".format(
  233. str(item.__class__)
  234. )
  235. )
  236. # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
  237. if len(item.raw) < TOO_BIG_SEQUENCE:
  238. for match in self._results:
  239. if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
  240. match.add_submatch(item)
  241. return
  242. self._results.append(item)
  243. self._results = sorted(self._results)
  244. def best(self) -> CharsetMatch | None:
  245. """
  246. Simply return the first match. Strict equivalent to matches[0].
  247. """
  248. if not self._results:
  249. return None
  250. return self._results[0]
  251. def first(self) -> CharsetMatch | None:
  252. """
  253. Redundant method, call the method best(). Kept for BC reasons.
  254. """
  255. return self.best()
  256. CoherenceMatch = Tuple[str, float]
  257. CoherenceMatches = List[CoherenceMatch]
  258. class CliDetectionResult:
  259. def __init__(
  260. self,
  261. path: str,
  262. encoding: str | None,
  263. encoding_aliases: list[str],
  264. alternative_encodings: list[str],
  265. language: str,
  266. alphabets: list[str],
  267. has_sig_or_bom: bool,
  268. chaos: float,
  269. coherence: float,
  270. unicode_path: str | None,
  271. is_preferred: bool,
  272. ):
  273. self.path: str = path
  274. self.unicode_path: str | None = unicode_path
  275. self.encoding: str | None = encoding
  276. self.encoding_aliases: list[str] = encoding_aliases
  277. self.alternative_encodings: list[str] = alternative_encodings
  278. self.language: str = language
  279. self.alphabets: list[str] = alphabets
  280. self.has_sig_or_bom: bool = has_sig_or_bom
  281. self.chaos: float = chaos
  282. self.coherence: float = coherence
  283. self.is_preferred: bool = is_preferred
  284. @property
  285. def __dict__(self) -> dict[str, Any]: # type: ignore
  286. return {
  287. "path": self.path,
  288. "encoding": self.encoding,
  289. "encoding_aliases": self.encoding_aliases,
  290. "alternative_encodings": self.alternative_encodings,
  291. "language": self.language,
  292. "alphabets": self.alphabets,
  293. "has_sig_or_bom": self.has_sig_or_bom,
  294. "chaos": self.chaos,
  295. "coherence": self.coherence,
  296. "unicode_path": self.unicode_path,
  297. "is_preferred": self.is_preferred,
  298. }
  299. def to_json(self) -> str:
  300. return dumps(self.__dict__, ensure_ascii=True, indent=4)