api.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. from __future__ import annotations
  2. import logging
  3. from os import PathLike
  4. from typing import BinaryIO
  5. from .cd import (
  6. coherence_ratio,
  7. encoding_languages,
  8. mb_encoding_languages,
  9. merge_coherence_ratios,
  10. )
  11. from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
  12. from .md import mess_ratio
  13. from .models import CharsetMatch, CharsetMatches
  14. from .utils import (
  15. any_specified_encoding,
  16. cut_sequence_chunks,
  17. iana_name,
  18. identify_sig_or_bom,
  19. is_cp_similar,
  20. is_multi_byte_encoding,
  21. should_strip_sig_or_bom,
  22. )
  23. logger = logging.getLogger("charset_normalizer")
  24. explain_handler = logging.StreamHandler()
  25. explain_handler.setFormatter(
  26. logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
  27. )
  28. def from_bytes(
  29. sequences: bytes | bytearray,
  30. steps: int = 5,
  31. chunk_size: int = 512,
  32. threshold: float = 0.2,
  33. cp_isolation: list[str] | None = None,
  34. cp_exclusion: list[str] | None = None,
  35. preemptive_behaviour: bool = True,
  36. explain: bool = False,
  37. language_threshold: float = 0.1,
  38. enable_fallback: bool = True,
  39. ) -> CharsetMatches:
  40. """
  41. Given a raw bytes sequence, return the best possibles charset usable to render str objects.
  42. If there is no results, it is a strong indicator that the source is binary/not text.
  43. By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
  44. And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
  45. The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
  46. but never take it for granted. Can improve the performance.
  47. You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
  48. purpose.
  49. This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
  50. By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
  51. toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
  52. Custom logging format and handler can be set manually.
  53. """
  54. if not isinstance(sequences, (bytearray, bytes)):
  55. raise TypeError(
  56. "Expected object of type bytes or bytearray, got: {}".format(
  57. type(sequences)
  58. )
  59. )
  60. if explain:
  61. previous_logger_level: int = logger.level
  62. logger.addHandler(explain_handler)
  63. logger.setLevel(TRACE)
  64. length: int = len(sequences)
  65. if length == 0:
  66. logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
  67. if explain: # Defensive: ensure exit path clean handler
  68. logger.removeHandler(explain_handler)
  69. logger.setLevel(previous_logger_level or logging.WARNING)
  70. return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
  71. if cp_isolation is not None:
  72. logger.log(
  73. TRACE,
  74. "cp_isolation is set. use this flag for debugging purpose. "
  75. "limited list of encoding allowed : %s.",
  76. ", ".join(cp_isolation),
  77. )
  78. cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
  79. else:
  80. cp_isolation = []
  81. if cp_exclusion is not None:
  82. logger.log(
  83. TRACE,
  84. "cp_exclusion is set. use this flag for debugging purpose. "
  85. "limited list of encoding excluded : %s.",
  86. ", ".join(cp_exclusion),
  87. )
  88. cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
  89. else:
  90. cp_exclusion = []
  91. if length <= (chunk_size * steps):
  92. logger.log(
  93. TRACE,
  94. "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
  95. steps,
  96. chunk_size,
  97. length,
  98. )
  99. steps = 1
  100. chunk_size = length
  101. if steps > 1 and length / steps < chunk_size:
  102. chunk_size = int(length / steps)
  103. is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
  104. is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
  105. if is_too_small_sequence:
  106. logger.log(
  107. TRACE,
  108. "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
  109. length
  110. ),
  111. )
  112. elif is_too_large_sequence:
  113. logger.log(
  114. TRACE,
  115. "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
  116. length
  117. ),
  118. )
  119. prioritized_encodings: list[str] = []
  120. specified_encoding: str | None = (
  121. any_specified_encoding(sequences) if preemptive_behaviour else None
  122. )
  123. if specified_encoding is not None:
  124. prioritized_encodings.append(specified_encoding)
  125. logger.log(
  126. TRACE,
  127. "Detected declarative mark in sequence. Priority +1 given for %s.",
  128. specified_encoding,
  129. )
  130. tested: set[str] = set()
  131. tested_but_hard_failure: list[str] = []
  132. tested_but_soft_failure: list[str] = []
  133. fallback_ascii: CharsetMatch | None = None
  134. fallback_u8: CharsetMatch | None = None
  135. fallback_specified: CharsetMatch | None = None
  136. results: CharsetMatches = CharsetMatches()
  137. early_stop_results: CharsetMatches = CharsetMatches()
  138. sig_encoding, sig_payload = identify_sig_or_bom(sequences)
  139. if sig_encoding is not None:
  140. prioritized_encodings.append(sig_encoding)
  141. logger.log(
  142. TRACE,
  143. "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
  144. len(sig_payload),
  145. sig_encoding,
  146. )
  147. prioritized_encodings.append("ascii")
  148. if "utf_8" not in prioritized_encodings:
  149. prioritized_encodings.append("utf_8")
  150. for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
  151. if cp_isolation and encoding_iana not in cp_isolation:
  152. continue
  153. if cp_exclusion and encoding_iana in cp_exclusion:
  154. continue
  155. if encoding_iana in tested:
  156. continue
  157. tested.add(encoding_iana)
  158. decoded_payload: str | None = None
  159. bom_or_sig_available: bool = sig_encoding == encoding_iana
  160. strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
  161. encoding_iana
  162. )
  163. if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
  164. logger.log(
  165. TRACE,
  166. "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
  167. encoding_iana,
  168. )
  169. continue
  170. if encoding_iana in {"utf_7"} and not bom_or_sig_available:
  171. logger.log(
  172. TRACE,
  173. "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
  174. encoding_iana,
  175. )
  176. continue
  177. try:
  178. is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
  179. except (ModuleNotFoundError, ImportError):
  180. logger.log(
  181. TRACE,
  182. "Encoding %s does not provide an IncrementalDecoder",
  183. encoding_iana,
  184. )
  185. continue
  186. try:
  187. if is_too_large_sequence and is_multi_byte_decoder is False:
  188. str(
  189. (
  190. sequences[: int(50e4)]
  191. if strip_sig_or_bom is False
  192. else sequences[len(sig_payload) : int(50e4)]
  193. ),
  194. encoding=encoding_iana,
  195. )
  196. else:
  197. decoded_payload = str(
  198. (
  199. sequences
  200. if strip_sig_or_bom is False
  201. else sequences[len(sig_payload) :]
  202. ),
  203. encoding=encoding_iana,
  204. )
  205. except (UnicodeDecodeError, LookupError) as e:
  206. if not isinstance(e, LookupError):
  207. logger.log(
  208. TRACE,
  209. "Code page %s does not fit given bytes sequence at ALL. %s",
  210. encoding_iana,
  211. str(e),
  212. )
  213. tested_but_hard_failure.append(encoding_iana)
  214. continue
  215. similar_soft_failure_test: bool = False
  216. for encoding_soft_failed in tested_but_soft_failure:
  217. if is_cp_similar(encoding_iana, encoding_soft_failed):
  218. similar_soft_failure_test = True
  219. break
  220. if similar_soft_failure_test:
  221. logger.log(
  222. TRACE,
  223. "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
  224. encoding_iana,
  225. encoding_soft_failed,
  226. )
  227. continue
  228. r_ = range(
  229. 0 if not bom_or_sig_available else len(sig_payload),
  230. length,
  231. int(length / steps),
  232. )
  233. multi_byte_bonus: bool = (
  234. is_multi_byte_decoder
  235. and decoded_payload is not None
  236. and len(decoded_payload) < length
  237. )
  238. if multi_byte_bonus:
  239. logger.log(
  240. TRACE,
  241. "Code page %s is a multi byte encoding table and it appear that at least one character "
  242. "was encoded using n-bytes.",
  243. encoding_iana,
  244. )
  245. max_chunk_gave_up: int = int(len(r_) / 4)
  246. max_chunk_gave_up = max(max_chunk_gave_up, 2)
  247. early_stop_count: int = 0
  248. lazy_str_hard_failure = False
  249. md_chunks: list[str] = []
  250. md_ratios = []
  251. try:
  252. for chunk in cut_sequence_chunks(
  253. sequences,
  254. encoding_iana,
  255. r_,
  256. chunk_size,
  257. bom_or_sig_available,
  258. strip_sig_or_bom,
  259. sig_payload,
  260. is_multi_byte_decoder,
  261. decoded_payload,
  262. ):
  263. md_chunks.append(chunk)
  264. md_ratios.append(
  265. mess_ratio(
  266. chunk,
  267. threshold,
  268. explain is True and 1 <= len(cp_isolation) <= 2,
  269. )
  270. )
  271. if md_ratios[-1] >= threshold:
  272. early_stop_count += 1
  273. if (early_stop_count >= max_chunk_gave_up) or (
  274. bom_or_sig_available and strip_sig_or_bom is False
  275. ):
  276. break
  277. except (
  278. UnicodeDecodeError
  279. ) as e: # Lazy str loading may have missed something there
  280. logger.log(
  281. TRACE,
  282. "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
  283. encoding_iana,
  284. str(e),
  285. )
  286. early_stop_count = max_chunk_gave_up
  287. lazy_str_hard_failure = True
  288. # We might want to check the sequence again with the whole content
  289. # Only if initial MD tests passes
  290. if (
  291. not lazy_str_hard_failure
  292. and is_too_large_sequence
  293. and not is_multi_byte_decoder
  294. ):
  295. try:
  296. sequences[int(50e3) :].decode(encoding_iana, errors="strict")
  297. except UnicodeDecodeError as e:
  298. logger.log(
  299. TRACE,
  300. "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
  301. encoding_iana,
  302. str(e),
  303. )
  304. tested_but_hard_failure.append(encoding_iana)
  305. continue
  306. mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
  307. if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
  308. tested_but_soft_failure.append(encoding_iana)
  309. logger.log(
  310. TRACE,
  311. "%s was excluded because of initial chaos probing. Gave up %i time(s). "
  312. "Computed mean chaos is %f %%.",
  313. encoding_iana,
  314. early_stop_count,
  315. round(mean_mess_ratio * 100, ndigits=3),
  316. )
  317. # Preparing those fallbacks in case we got nothing.
  318. if (
  319. enable_fallback
  320. and encoding_iana in ["ascii", "utf_8", specified_encoding]
  321. and not lazy_str_hard_failure
  322. ):
  323. fallback_entry = CharsetMatch(
  324. sequences,
  325. encoding_iana,
  326. threshold,
  327. False,
  328. [],
  329. decoded_payload,
  330. preemptive_declaration=specified_encoding,
  331. )
  332. if encoding_iana == specified_encoding:
  333. fallback_specified = fallback_entry
  334. elif encoding_iana == "ascii":
  335. fallback_ascii = fallback_entry
  336. else:
  337. fallback_u8 = fallback_entry
  338. continue
  339. logger.log(
  340. TRACE,
  341. "%s passed initial chaos probing. Mean measured chaos is %f %%",
  342. encoding_iana,
  343. round(mean_mess_ratio * 100, ndigits=3),
  344. )
  345. if not is_multi_byte_decoder:
  346. target_languages: list[str] = encoding_languages(encoding_iana)
  347. else:
  348. target_languages = mb_encoding_languages(encoding_iana)
  349. if target_languages:
  350. logger.log(
  351. TRACE,
  352. "{} should target any language(s) of {}".format(
  353. encoding_iana, str(target_languages)
  354. ),
  355. )
  356. cd_ratios = []
  357. # We shall skip the CD when its about ASCII
  358. # Most of the time its not relevant to run "language-detection" on it.
  359. if encoding_iana != "ascii":
  360. for chunk in md_chunks:
  361. chunk_languages = coherence_ratio(
  362. chunk,
  363. language_threshold,
  364. ",".join(target_languages) if target_languages else None,
  365. )
  366. cd_ratios.append(chunk_languages)
  367. cd_ratios_merged = merge_coherence_ratios(cd_ratios)
  368. if cd_ratios_merged:
  369. logger.log(
  370. TRACE,
  371. "We detected language {} using {}".format(
  372. cd_ratios_merged, encoding_iana
  373. ),
  374. )
  375. current_match = CharsetMatch(
  376. sequences,
  377. encoding_iana,
  378. mean_mess_ratio,
  379. bom_or_sig_available,
  380. cd_ratios_merged,
  381. (
  382. decoded_payload
  383. if (
  384. is_too_large_sequence is False
  385. or encoding_iana in [specified_encoding, "ascii", "utf_8"]
  386. )
  387. else None
  388. ),
  389. preemptive_declaration=specified_encoding,
  390. )
  391. results.append(current_match)
  392. if (
  393. encoding_iana in [specified_encoding, "ascii", "utf_8"]
  394. and mean_mess_ratio < 0.1
  395. ):
  396. # If md says nothing to worry about, then... stop immediately!
  397. if mean_mess_ratio == 0.0:
  398. logger.debug(
  399. "Encoding detection: %s is most likely the one.",
  400. current_match.encoding,
  401. )
  402. if explain: # Defensive: ensure exit path clean handler
  403. logger.removeHandler(explain_handler)
  404. logger.setLevel(previous_logger_level)
  405. return CharsetMatches([current_match])
  406. early_stop_results.append(current_match)
  407. if (
  408. len(early_stop_results)
  409. and (specified_encoding is None or specified_encoding in tested)
  410. and "ascii" in tested
  411. and "utf_8" in tested
  412. ):
  413. probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment]
  414. logger.debug(
  415. "Encoding detection: %s is most likely the one.",
  416. probable_result.encoding,
  417. )
  418. if explain: # Defensive: ensure exit path clean handler
  419. logger.removeHandler(explain_handler)
  420. logger.setLevel(previous_logger_level)
  421. return CharsetMatches([probable_result])
  422. if encoding_iana == sig_encoding:
  423. logger.debug(
  424. "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
  425. "the beginning of the sequence.",
  426. encoding_iana,
  427. )
  428. if explain: # Defensive: ensure exit path clean handler
  429. logger.removeHandler(explain_handler)
  430. logger.setLevel(previous_logger_level)
  431. return CharsetMatches([results[encoding_iana]])
  432. if len(results) == 0:
  433. if fallback_u8 or fallback_ascii or fallback_specified:
  434. logger.log(
  435. TRACE,
  436. "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
  437. )
  438. if fallback_specified:
  439. logger.debug(
  440. "Encoding detection: %s will be used as a fallback match",
  441. fallback_specified.encoding,
  442. )
  443. results.append(fallback_specified)
  444. elif (
  445. (fallback_u8 and fallback_ascii is None)
  446. or (
  447. fallback_u8
  448. and fallback_ascii
  449. and fallback_u8.fingerprint != fallback_ascii.fingerprint
  450. )
  451. or (fallback_u8 is not None)
  452. ):
  453. logger.debug("Encoding detection: utf_8 will be used as a fallback match")
  454. results.append(fallback_u8)
  455. elif fallback_ascii:
  456. logger.debug("Encoding detection: ascii will be used as a fallback match")
  457. results.append(fallback_ascii)
  458. if results:
  459. logger.debug(
  460. "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
  461. results.best().encoding, # type: ignore
  462. len(results) - 1,
  463. )
  464. else:
  465. logger.debug("Encoding detection: Unable to determine any suitable charset.")
  466. if explain:
  467. logger.removeHandler(explain_handler)
  468. logger.setLevel(previous_logger_level)
  469. return results
  470. def from_fp(
  471. fp: BinaryIO,
  472. steps: int = 5,
  473. chunk_size: int = 512,
  474. threshold: float = 0.20,
  475. cp_isolation: list[str] | None = None,
  476. cp_exclusion: list[str] | None = None,
  477. preemptive_behaviour: bool = True,
  478. explain: bool = False,
  479. language_threshold: float = 0.1,
  480. enable_fallback: bool = True,
  481. ) -> CharsetMatches:
  482. """
  483. Same thing than the function from_bytes but using a file pointer that is already ready.
  484. Will not close the file pointer.
  485. """
  486. return from_bytes(
  487. fp.read(),
  488. steps,
  489. chunk_size,
  490. threshold,
  491. cp_isolation,
  492. cp_exclusion,
  493. preemptive_behaviour,
  494. explain,
  495. language_threshold,
  496. enable_fallback,
  497. )
  498. def from_path(
  499. path: str | bytes | PathLike, # type: ignore[type-arg]
  500. steps: int = 5,
  501. chunk_size: int = 512,
  502. threshold: float = 0.20,
  503. cp_isolation: list[str] | None = None,
  504. cp_exclusion: list[str] | None = None,
  505. preemptive_behaviour: bool = True,
  506. explain: bool = False,
  507. language_threshold: float = 0.1,
  508. enable_fallback: bool = True,
  509. ) -> CharsetMatches:
  510. """
  511. Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
  512. Can raise IOError.
  513. """
  514. with open(path, "rb") as fp:
  515. return from_fp(
  516. fp,
  517. steps,
  518. chunk_size,
  519. threshold,
  520. cp_isolation,
  521. cp_exclusion,
  522. preemptive_behaviour,
  523. explain,
  524. language_threshold,
  525. enable_fallback,
  526. )
  527. def is_binary(
  528. fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg]
  529. steps: int = 5,
  530. chunk_size: int = 512,
  531. threshold: float = 0.20,
  532. cp_isolation: list[str] | None = None,
  533. cp_exclusion: list[str] | None = None,
  534. preemptive_behaviour: bool = True,
  535. explain: bool = False,
  536. language_threshold: float = 0.1,
  537. enable_fallback: bool = False,
  538. ) -> bool:
  539. """
  540. Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
  541. Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
  542. are disabled to be stricter around ASCII-compatible but unlikely to be a string.
  543. """
  544. if isinstance(fp_or_path_or_payload, (str, PathLike)):
  545. guesses = from_path(
  546. fp_or_path_or_payload,
  547. steps=steps,
  548. chunk_size=chunk_size,
  549. threshold=threshold,
  550. cp_isolation=cp_isolation,
  551. cp_exclusion=cp_exclusion,
  552. preemptive_behaviour=preemptive_behaviour,
  553. explain=explain,
  554. language_threshold=language_threshold,
  555. enable_fallback=enable_fallback,
  556. )
  557. elif isinstance(
  558. fp_or_path_or_payload,
  559. (
  560. bytes,
  561. bytearray,
  562. ),
  563. ):
  564. guesses = from_bytes(
  565. fp_or_path_or_payload,
  566. steps=steps,
  567. chunk_size=chunk_size,
  568. threshold=threshold,
  569. cp_isolation=cp_isolation,
  570. cp_exclusion=cp_exclusion,
  571. preemptive_behaviour=preemptive_behaviour,
  572. explain=explain,
  573. language_threshold=language_threshold,
  574. enable_fallback=enable_fallback,
  575. )
  576. else:
  577. guesses = from_fp(
  578. fp_or_path_or_payload,
  579. steps=steps,
  580. chunk_size=chunk_size,
  581. threshold=threshold,
  582. cp_isolation=cp_isolation,
  583. cp_exclusion=cp_exclusion,
  584. preemptive_behaviour=preemptive_behaviour,
  585. explain=explain,
  586. language_threshold=language_threshold,
  587. enable_fallback=enable_fallback,
  588. )
  589. return not guesses