utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. from __future__ import annotations
  2. import importlib
  3. import logging
  4. import unicodedata
  5. from codecs import IncrementalDecoder
  6. from encodings.aliases import aliases
  7. from functools import lru_cache
  8. from re import findall
  9. from typing import Generator
  10. from _multibytecodec import ( # type: ignore[import-not-found,import]
  11. MultibyteIncrementalDecoder,
  12. )
  13. from .constant import (
  14. ENCODING_MARKS,
  15. IANA_SUPPORTED_SIMILAR,
  16. RE_POSSIBLE_ENCODING_INDICATION,
  17. UNICODE_RANGES_COMBINED,
  18. UNICODE_SECONDARY_RANGE_KEYWORD,
  19. UTF8_MAXIMAL_ALLOCATION,
  20. )
  21. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  22. def is_accentuated(character: str) -> bool:
  23. try:
  24. description: str = unicodedata.name(character)
  25. except ValueError: # Defensive: unicode database outdated?
  26. return False
  27. return (
  28. "WITH GRAVE" in description
  29. or "WITH ACUTE" in description
  30. or "WITH CEDILLA" in description
  31. or "WITH DIAERESIS" in description
  32. or "WITH CIRCUMFLEX" in description
  33. or "WITH TILDE" in description
  34. or "WITH MACRON" in description
  35. or "WITH RING ABOVE" in description
  36. )
  37. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  38. def remove_accent(character: str) -> str:
  39. decomposed: str = unicodedata.decomposition(character)
  40. if not decomposed:
  41. return character
  42. codes: list[str] = decomposed.split(" ")
  43. return chr(int(codes[0], 16))
  44. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  45. def unicode_range(character: str) -> str | None:
  46. """
  47. Retrieve the Unicode range official name from a single character.
  48. """
  49. character_ord: int = ord(character)
  50. for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
  51. if character_ord in ord_range:
  52. return range_name
  53. return None
  54. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  55. def is_latin(character: str) -> bool:
  56. try:
  57. description: str = unicodedata.name(character)
  58. except ValueError: # Defensive: unicode database outdated?
  59. return False
  60. return "LATIN" in description
  61. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  62. def is_punctuation(character: str) -> bool:
  63. character_category: str = unicodedata.category(character)
  64. if "P" in character_category:
  65. return True
  66. character_range: str | None = unicode_range(character)
  67. if character_range is None:
  68. return False
  69. return "Punctuation" in character_range
  70. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  71. def is_symbol(character: str) -> bool:
  72. character_category: str = unicodedata.category(character)
  73. if "S" in character_category or "N" in character_category:
  74. return True
  75. character_range: str | None = unicode_range(character)
  76. if character_range is None:
  77. return False
  78. return "Forms" in character_range and character_category != "Lo"
  79. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  80. def is_emoticon(character: str) -> bool:
  81. character_range: str | None = unicode_range(character)
  82. if character_range is None:
  83. return False
  84. return "Emoticons" in character_range or "Pictographs" in character_range
  85. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  86. def is_separator(character: str) -> bool:
  87. if character.isspace() or character in {"|", "+", "<", ">"}:
  88. return True
  89. character_category: str = unicodedata.category(character)
  90. return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
  91. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  92. def is_case_variable(character: str) -> bool:
  93. return character.islower() != character.isupper()
  94. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  95. def is_cjk(character: str) -> bool:
  96. try:
  97. character_name = unicodedata.name(character)
  98. except ValueError: # Defensive: unicode database outdated?
  99. return False
  100. return "CJK" in character_name
  101. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  102. def is_hiragana(character: str) -> bool:
  103. try:
  104. character_name = unicodedata.name(character)
  105. except ValueError: # Defensive: unicode database outdated?
  106. return False
  107. return "HIRAGANA" in character_name
  108. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  109. def is_katakana(character: str) -> bool:
  110. try:
  111. character_name = unicodedata.name(character)
  112. except ValueError: # Defensive: unicode database outdated?
  113. return False
  114. return "KATAKANA" in character_name
  115. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  116. def is_hangul(character: str) -> bool:
  117. try:
  118. character_name = unicodedata.name(character)
  119. except ValueError: # Defensive: unicode database outdated?
  120. return False
  121. return "HANGUL" in character_name
  122. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  123. def is_thai(character: str) -> bool:
  124. try:
  125. character_name = unicodedata.name(character)
  126. except ValueError: # Defensive: unicode database outdated?
  127. return False
  128. return "THAI" in character_name
  129. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  130. def is_arabic(character: str) -> bool:
  131. try:
  132. character_name = unicodedata.name(character)
  133. except ValueError: # Defensive: unicode database outdated?
  134. return False
  135. return "ARABIC" in character_name
  136. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  137. def is_arabic_isolated_form(character: str) -> bool:
  138. try:
  139. character_name = unicodedata.name(character)
  140. except ValueError: # Defensive: unicode database outdated?
  141. return False
  142. return "ARABIC" in character_name and "ISOLATED FORM" in character_name
  143. @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
  144. def is_unicode_range_secondary(range_name: str) -> bool:
  145. return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
  146. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  147. def is_unprintable(character: str) -> bool:
  148. return (
  149. character.isspace() is False # includes \n \t \r \v
  150. and character.isprintable() is False
  151. and character != "\x1a" # Why? Its the ASCII substitute character.
  152. and character != "\ufeff" # bug discovered in Python,
  153. # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
  154. )
  155. def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None:
  156. """
  157. Extract using ASCII-only decoder any specified encoding in the first n-bytes.
  158. """
  159. if not isinstance(sequence, bytes):
  160. raise TypeError
  161. seq_len: int = len(sequence)
  162. results: list[str] = findall(
  163. RE_POSSIBLE_ENCODING_INDICATION,
  164. sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
  165. )
  166. if len(results) == 0:
  167. return None
  168. for specified_encoding in results:
  169. specified_encoding = specified_encoding.lower().replace("-", "_")
  170. encoding_alias: str
  171. encoding_iana: str
  172. for encoding_alias, encoding_iana in aliases.items():
  173. if encoding_alias == specified_encoding:
  174. return encoding_iana
  175. if encoding_iana == specified_encoding:
  176. return encoding_iana
  177. return None
  178. @lru_cache(maxsize=128)
  179. def is_multi_byte_encoding(name: str) -> bool:
  180. """
  181. Verify is a specific encoding is a multi byte one based on it IANA name
  182. """
  183. return name in {
  184. "utf_8",
  185. "utf_8_sig",
  186. "utf_16",
  187. "utf_16_be",
  188. "utf_16_le",
  189. "utf_32",
  190. "utf_32_le",
  191. "utf_32_be",
  192. "utf_7",
  193. } or issubclass(
  194. importlib.import_module(f"encodings.{name}").IncrementalDecoder,
  195. MultibyteIncrementalDecoder,
  196. )
  197. def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]:
  198. """
  199. Identify and extract SIG/BOM in given sequence.
  200. """
  201. for iana_encoding in ENCODING_MARKS:
  202. marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding]
  203. if isinstance(marks, bytes):
  204. marks = [marks]
  205. for mark in marks:
  206. if sequence.startswith(mark):
  207. return iana_encoding, mark
  208. return None, b""
  209. def should_strip_sig_or_bom(iana_encoding: str) -> bool:
  210. return iana_encoding not in {"utf_16", "utf_32"}
  211. def iana_name(cp_name: str, strict: bool = True) -> str:
  212. """Returns the Python normalized encoding name (Not the IANA official name)."""
  213. cp_name = cp_name.lower().replace("-", "_")
  214. encoding_alias: str
  215. encoding_iana: str
  216. for encoding_alias, encoding_iana in aliases.items():
  217. if cp_name in [encoding_alias, encoding_iana]:
  218. return encoding_iana
  219. if strict:
  220. raise ValueError(f"Unable to retrieve IANA for '{cp_name}'")
  221. return cp_name
  222. def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
  223. if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
  224. return 0.0
  225. decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder
  226. decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder
  227. id_a: IncrementalDecoder = decoder_a(errors="ignore")
  228. id_b: IncrementalDecoder = decoder_b(errors="ignore")
  229. character_match_count: int = 0
  230. for i in range(255):
  231. to_be_decoded: bytes = bytes([i])
  232. if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
  233. character_match_count += 1
  234. return character_match_count / 254
  235. def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
  236. """
  237. Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
  238. the function cp_similarity.
  239. """
  240. return (
  241. iana_name_a in IANA_SUPPORTED_SIMILAR
  242. and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
  243. )
  244. def set_logging_handler(
  245. name: str = "charset_normalizer",
  246. level: int = logging.INFO,
  247. format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
  248. ) -> None:
  249. logger = logging.getLogger(name)
  250. logger.setLevel(level)
  251. handler = logging.StreamHandler()
  252. handler.setFormatter(logging.Formatter(format_string))
  253. logger.addHandler(handler)
  254. def cut_sequence_chunks(
  255. sequences: bytes,
  256. encoding_iana: str,
  257. offsets: range,
  258. chunk_size: int,
  259. bom_or_sig_available: bool,
  260. strip_sig_or_bom: bool,
  261. sig_payload: bytes,
  262. is_multi_byte_decoder: bool,
  263. decoded_payload: str | None = None,
  264. ) -> Generator[str, None, None]:
  265. if decoded_payload and is_multi_byte_decoder is False:
  266. for i in offsets:
  267. chunk = decoded_payload[i : i + chunk_size]
  268. if not chunk:
  269. break
  270. yield chunk
  271. else:
  272. for i in offsets:
  273. chunk_end = i + chunk_size
  274. if chunk_end > len(sequences) + 8:
  275. continue
  276. cut_sequence = sequences[i : i + chunk_size]
  277. if bom_or_sig_available and strip_sig_or_bom is False:
  278. cut_sequence = sig_payload + cut_sequence
  279. chunk = cut_sequence.decode(
  280. encoding_iana,
  281. errors="ignore" if is_multi_byte_decoder else "strict",
  282. )
  283. # multi-byte bad cutting detector and adjustment
  284. # not the cleanest way to perform that fix but clever enough for now.
  285. if is_multi_byte_decoder and i > 0:
  286. chunk_partial_size_chk: int = min(chunk_size, 16)
  287. if (
  288. decoded_payload
  289. and chunk[:chunk_partial_size_chk] not in decoded_payload
  290. ):
  291. for j in range(i, i - 4, -1):
  292. cut_sequence = sequences[j:chunk_end]
  293. if bom_or_sig_available and strip_sig_or_bom is False:
  294. cut_sequence = sig_payload + cut_sequence
  295. chunk = cut_sequence.decode(encoding_iana, errors="ignore")
  296. if chunk[:chunk_partial_size_chk] in decoded_payload:
  297. break
  298. yield chunk