utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. import importlib
  2. import logging
  3. import unicodedata
  4. from codecs import IncrementalDecoder
  5. from encodings.aliases import aliases
  6. from functools import lru_cache
  7. from re import findall
  8. from typing import Generator, List, Optional, Set, Tuple, Union
  9. from _multibytecodec import MultibyteIncrementalDecoder
  10. from .constant import (
  11. ENCODING_MARKS,
  12. IANA_SUPPORTED_SIMILAR,
  13. RE_POSSIBLE_ENCODING_INDICATION,
  14. UNICODE_RANGES_COMBINED,
  15. UNICODE_SECONDARY_RANGE_KEYWORD,
  16. UTF8_MAXIMAL_ALLOCATION,
  17. )
  18. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  19. def is_accentuated(character: str) -> bool:
  20. try:
  21. description: str = unicodedata.name(character)
  22. except ValueError:
  23. return False
  24. return (
  25. "WITH GRAVE" in description
  26. or "WITH ACUTE" in description
  27. or "WITH CEDILLA" in description
  28. or "WITH DIAERESIS" in description
  29. or "WITH CIRCUMFLEX" in description
  30. or "WITH TILDE" in description
  31. or "WITH MACRON" in description
  32. or "WITH RING ABOVE" in description
  33. )
  34. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  35. def remove_accent(character: str) -> str:
  36. decomposed: str = unicodedata.decomposition(character)
  37. if not decomposed:
  38. return character
  39. codes: List[str] = decomposed.split(" ")
  40. return chr(int(codes[0], 16))
  41. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  42. def unicode_range(character: str) -> Optional[str]:
  43. """
  44. Retrieve the Unicode range official name from a single character.
  45. """
  46. character_ord: int = ord(character)
  47. for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
  48. if character_ord in ord_range:
  49. return range_name
  50. return None
  51. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  52. def is_latin(character: str) -> bool:
  53. try:
  54. description: str = unicodedata.name(character)
  55. except ValueError:
  56. return False
  57. return "LATIN" in description
  58. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  59. def is_punctuation(character: str) -> bool:
  60. character_category: str = unicodedata.category(character)
  61. if "P" in character_category:
  62. return True
  63. character_range: Optional[str] = unicode_range(character)
  64. if character_range is None:
  65. return False
  66. return "Punctuation" in character_range
  67. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  68. def is_symbol(character: str) -> bool:
  69. character_category: str = unicodedata.category(character)
  70. if "S" in character_category or "N" in character_category:
  71. return True
  72. character_range: Optional[str] = unicode_range(character)
  73. if character_range is None:
  74. return False
  75. return "Forms" in character_range and character_category != "Lo"
  76. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  77. def is_emoticon(character: str) -> bool:
  78. character_range: Optional[str] = unicode_range(character)
  79. if character_range is None:
  80. return False
  81. return "Emoticons" in character_range or "Pictographs" in character_range
  82. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  83. def is_separator(character: str) -> bool:
  84. if character.isspace() or character in {"|", "+", "<", ">"}:
  85. return True
  86. character_category: str = unicodedata.category(character)
  87. return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
  88. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  89. def is_case_variable(character: str) -> bool:
  90. return character.islower() != character.isupper()
  91. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  92. def is_cjk(character: str) -> bool:
  93. try:
  94. character_name = unicodedata.name(character)
  95. except ValueError:
  96. return False
  97. return "CJK" in character_name
  98. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  99. def is_hiragana(character: str) -> bool:
  100. try:
  101. character_name = unicodedata.name(character)
  102. except ValueError:
  103. return False
  104. return "HIRAGANA" in character_name
  105. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  106. def is_katakana(character: str) -> bool:
  107. try:
  108. character_name = unicodedata.name(character)
  109. except ValueError:
  110. return False
  111. return "KATAKANA" in character_name
  112. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  113. def is_hangul(character: str) -> bool:
  114. try:
  115. character_name = unicodedata.name(character)
  116. except ValueError:
  117. return False
  118. return "HANGUL" in character_name
  119. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  120. def is_thai(character: str) -> bool:
  121. try:
  122. character_name = unicodedata.name(character)
  123. except ValueError:
  124. return False
  125. return "THAI" in character_name
  126. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  127. def is_arabic(character: str) -> bool:
  128. try:
  129. character_name = unicodedata.name(character)
  130. except ValueError:
  131. return False
  132. return "ARABIC" in character_name
  133. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  134. def is_arabic_isolated_form(character: str) -> bool:
  135. try:
  136. character_name = unicodedata.name(character)
  137. except ValueError:
  138. return False
  139. return "ARABIC" in character_name and "ISOLATED FORM" in character_name
  140. @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
  141. def is_unicode_range_secondary(range_name: str) -> bool:
  142. return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
  143. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  144. def is_unprintable(character: str) -> bool:
  145. return (
  146. character.isspace() is False # includes \n \t \r \v
  147. and character.isprintable() is False
  148. and character != "\x1A" # Why? Its the ASCII substitute character.
  149. and character != "\ufeff" # bug discovered in Python,
  150. # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
  151. )
  152. def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]:
  153. """
  154. Extract using ASCII-only decoder any specified encoding in the first n-bytes.
  155. """
  156. if not isinstance(sequence, bytes):
  157. raise TypeError
  158. seq_len: int = len(sequence)
  159. results: List[str] = findall(
  160. RE_POSSIBLE_ENCODING_INDICATION,
  161. sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
  162. )
  163. if len(results) == 0:
  164. return None
  165. for specified_encoding in results:
  166. specified_encoding = specified_encoding.lower().replace("-", "_")
  167. encoding_alias: str
  168. encoding_iana: str
  169. for encoding_alias, encoding_iana in aliases.items():
  170. if encoding_alias == specified_encoding:
  171. return encoding_iana
  172. if encoding_iana == specified_encoding:
  173. return encoding_iana
  174. return None
  175. @lru_cache(maxsize=128)
  176. def is_multi_byte_encoding(name: str) -> bool:
  177. """
  178. Verify is a specific encoding is a multi byte one based on it IANA name
  179. """
  180. return name in {
  181. "utf_8",
  182. "utf_8_sig",
  183. "utf_16",
  184. "utf_16_be",
  185. "utf_16_le",
  186. "utf_32",
  187. "utf_32_le",
  188. "utf_32_be",
  189. "utf_7",
  190. } or issubclass(
  191. importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
  192. MultibyteIncrementalDecoder,
  193. )
  194. def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
  195. """
  196. Identify and extract SIG/BOM in given sequence.
  197. """
  198. for iana_encoding in ENCODING_MARKS:
  199. marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
  200. if isinstance(marks, bytes):
  201. marks = [marks]
  202. for mark in marks:
  203. if sequence.startswith(mark):
  204. return iana_encoding, mark
  205. return None, b""
  206. def should_strip_sig_or_bom(iana_encoding: str) -> bool:
  207. return iana_encoding not in {"utf_16", "utf_32"}
  208. def iana_name(cp_name: str, strict: bool = True) -> str:
  209. cp_name = cp_name.lower().replace("-", "_")
  210. encoding_alias: str
  211. encoding_iana: str
  212. for encoding_alias, encoding_iana in aliases.items():
  213. if cp_name in [encoding_alias, encoding_iana]:
  214. return encoding_iana
  215. if strict:
  216. raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
  217. return cp_name
  218. def range_scan(decoded_sequence: str) -> List[str]:
  219. ranges: Set[str] = set()
  220. for character in decoded_sequence:
  221. character_range: Optional[str] = unicode_range(character)
  222. if character_range is None:
  223. continue
  224. ranges.add(character_range)
  225. return list(ranges)
  226. def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
  227. if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
  228. return 0.0
  229. decoder_a = importlib.import_module(
  230. "encodings.{}".format(iana_name_a)
  231. ).IncrementalDecoder
  232. decoder_b = importlib.import_module(
  233. "encodings.{}".format(iana_name_b)
  234. ).IncrementalDecoder
  235. id_a: IncrementalDecoder = decoder_a(errors="ignore")
  236. id_b: IncrementalDecoder = decoder_b(errors="ignore")
  237. character_match_count: int = 0
  238. for i in range(255):
  239. to_be_decoded: bytes = bytes([i])
  240. if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
  241. character_match_count += 1
  242. return character_match_count / 254
  243. def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
  244. """
  245. Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
  246. the function cp_similarity.
  247. """
  248. return (
  249. iana_name_a in IANA_SUPPORTED_SIMILAR
  250. and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
  251. )
  252. def set_logging_handler(
  253. name: str = "charset_normalizer",
  254. level: int = logging.INFO,
  255. format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
  256. ) -> None:
  257. logger = logging.getLogger(name)
  258. logger.setLevel(level)
  259. handler = logging.StreamHandler()
  260. handler.setFormatter(logging.Formatter(format_string))
  261. logger.addHandler(handler)
  262. def cut_sequence_chunks(
  263. sequences: bytes,
  264. encoding_iana: str,
  265. offsets: range,
  266. chunk_size: int,
  267. bom_or_sig_available: bool,
  268. strip_sig_or_bom: bool,
  269. sig_payload: bytes,
  270. is_multi_byte_decoder: bool,
  271. decoded_payload: Optional[str] = None,
  272. ) -> Generator[str, None, None]:
  273. if decoded_payload and is_multi_byte_decoder is False:
  274. for i in offsets:
  275. chunk = decoded_payload[i : i + chunk_size]
  276. if not chunk:
  277. break
  278. yield chunk
  279. else:
  280. for i in offsets:
  281. chunk_end = i + chunk_size
  282. if chunk_end > len(sequences) + 8:
  283. continue
  284. cut_sequence = sequences[i : i + chunk_size]
  285. if bom_or_sig_available and strip_sig_or_bom is False:
  286. cut_sequence = sig_payload + cut_sequence
  287. chunk = cut_sequence.decode(
  288. encoding_iana,
  289. errors="ignore" if is_multi_byte_decoder else "strict",
  290. )
  291. # multi-byte bad cutting detector and adjustment
  292. # not the cleanest way to perform that fix but clever enough for now.
  293. if is_multi_byte_decoder and i > 0:
  294. chunk_partial_size_chk: int = min(chunk_size, 16)
  295. if (
  296. decoded_payload
  297. and chunk[:chunk_partial_size_chk] not in decoded_payload
  298. ):
  299. for j in range(i, i - 4, -1):
  300. cut_sequence = sequences[j:chunk_end]
  301. if bom_or_sig_available and strip_sig_or_bom is False:
  302. cut_sequence = sig_payload + cut_sequence
  303. chunk = cut_sequence.decode(encoding_iana, errors="ignore")
  304. if chunk[:chunk_partial_size_chk] in decoded_payload:
  305. break
  306. yield chunk