jslexer.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. """
  2. babel.messages.jslexer
  3. ~~~~~~~~~~~~~~~~~~~~~~
  4. A simple JavaScript 1.5 lexer which is used for the JavaScript
  5. extractor.
  6. :copyright: (c) 2013-2025 by the Babel Team.
  7. :license: BSD, see LICENSE for more details.
  8. """
  9. from __future__ import annotations
  10. import re
  11. from collections.abc import Generator
  12. from typing import NamedTuple
  13. operators: list[str] = sorted([
  14. '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
  15. '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
  16. '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
  17. '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':',
  18. ], key=len, reverse=True)
  19. escapes: dict[str, str] = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
  20. name_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)
  21. dotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)
  22. division_re = re.compile(r'/=?')
  23. regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*', re.DOTALL)
  24. line_re = re.compile(r'(\r\n|\n|\r)')
  25. line_join_re = re.compile(r'\\' + line_re.pattern)
  26. uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
  27. hex_escape_re = re.compile(r'[a-fA-F0-9]{1,2}')
  28. class Token(NamedTuple):
  29. type: str
  30. value: str
  31. lineno: int
  32. _rules: list[tuple[str | None, re.Pattern[str]]] = [
  33. (None, re.compile(r'\s+', re.UNICODE)),
  34. (None, re.compile(r'<!--.*')),
  35. ('linecomment', re.compile(r'//.*')),
  36. ('multilinecomment', re.compile(r'/\*.*?\*/', re.UNICODE | re.DOTALL)),
  37. ('dotted_name', dotted_name_re),
  38. ('name', name_re),
  39. ('number', re.compile(r'''(
  40. (?:0|[1-9]\d*)
  41. (\.\d+)?
  42. ([eE][-+]?\d+)? |
  43. (0x[a-fA-F0-9]+)
  44. )''', re.VERBOSE)),
  45. ('jsx_tag', re.compile(r'(?:</?[^>\s]+|/>)', re.I)), # May be mangled in `get_rules`
  46. ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
  47. ('template_string', re.compile(r'''`(?:[^`\\]*(?:\\.[^`\\]*)*)`''', re.UNICODE)),
  48. ('string', re.compile(r'''(
  49. '(?:[^'\\]*(?:\\.[^'\\]*)*)' |
  50. "(?:[^"\\]*(?:\\.[^"\\]*)*)"
  51. )''', re.VERBOSE | re.DOTALL)),
  52. ]
  53. def get_rules(jsx: bool, dotted: bool, template_string: bool) -> list[tuple[str | None, re.Pattern[str]]]:
  54. """
  55. Get a tokenization rule list given the passed syntax options.
  56. Internal to this module.
  57. """
  58. rules = []
  59. for token_type, rule in _rules:
  60. if not jsx and token_type and 'jsx' in token_type:
  61. continue
  62. if not template_string and token_type == 'template_string':
  63. continue
  64. if token_type == 'dotted_name':
  65. if not dotted:
  66. continue
  67. token_type = 'name'
  68. rules.append((token_type, rule))
  69. return rules
  70. def indicates_division(token: Token) -> bool:
  71. """A helper function that helps the tokenizer to decide if the current
  72. token may be followed by a division operator.
  73. """
  74. if token.type == 'operator':
  75. return token.value in (')', ']', '}', '++', '--')
  76. return token.type in ('name', 'number', 'string', 'regexp')
  77. def unquote_string(string: str) -> str:
  78. """Unquote a string with JavaScript rules. The string has to start with
  79. string delimiters (``'``, ``"`` or the back-tick/grave accent (for template strings).)
  80. """
  81. assert string and string[0] == string[-1] and string[0] in '"\'`', \
  82. 'string provided is not properly delimited'
  83. string = line_join_re.sub('\\1', string[1:-1])
  84. result: list[str] = []
  85. add = result.append
  86. pos = 0
  87. while True:
  88. # scan for the next escape
  89. escape_pos = string.find('\\', pos)
  90. if escape_pos < 0:
  91. break
  92. add(string[pos:escape_pos])
  93. # check which character is escaped
  94. next_char = string[escape_pos + 1]
  95. if next_char in escapes:
  96. add(escapes[next_char])
  97. # unicode escapes. trie to consume up to four characters of
  98. # hexadecimal characters and try to interpret them as unicode
  99. # character point. If there is no such character point, put
  100. # all the consumed characters into the string.
  101. elif next_char in 'uU':
  102. escaped = uni_escape_re.match(string, escape_pos + 2)
  103. if escaped is not None:
  104. escaped_value = escaped.group()
  105. if len(escaped_value) == 4:
  106. try:
  107. add(chr(int(escaped_value, 16)))
  108. except ValueError:
  109. pass
  110. else:
  111. pos = escape_pos + 6
  112. continue
  113. add(next_char + escaped_value)
  114. pos = escaped.end()
  115. continue
  116. else:
  117. add(next_char)
  118. # hex escapes. conversion from 2-digits hex to char is infallible
  119. elif next_char in 'xX':
  120. escaped = hex_escape_re.match(string, escape_pos + 2)
  121. if escaped is not None:
  122. escaped_value = escaped.group()
  123. add(chr(int(escaped_value, 16)))
  124. pos = escape_pos + 2 + len(escaped_value)
  125. continue
  126. else:
  127. add(next_char)
  128. # bogus escape. Just remove the backslash.
  129. else:
  130. add(next_char)
  131. pos = escape_pos + 2
  132. if pos < len(string):
  133. add(string[pos:])
  134. return ''.join(result)
  135. def tokenize(source: str, jsx: bool = True, dotted: bool = True, template_string: bool = True, lineno: int = 1) -> Generator[Token, None, None]:
  136. """
  137. Tokenize JavaScript/JSX source. Returns a generator of tokens.
  138. :param source: The JavaScript source to tokenize.
  139. :param jsx: Enable (limited) JSX parsing.
  140. :param dotted: Read dotted names as single name token.
  141. :param template_string: Support ES6 template strings
  142. :param lineno: starting line number (optional)
  143. """
  144. may_divide = False
  145. pos = 0
  146. end = len(source)
  147. rules = get_rules(jsx=jsx, dotted=dotted, template_string=template_string)
  148. while pos < end:
  149. # handle regular rules first
  150. for token_type, rule in rules: # noqa: B007
  151. match = rule.match(source, pos)
  152. if match is not None:
  153. break
  154. # if we don't have a match we don't give up yet, but check for
  155. # division operators or regular expression literals, based on
  156. # the status of `may_divide` which is determined by the last
  157. # processed non-whitespace token using `indicates_division`.
  158. else:
  159. if may_divide:
  160. match = division_re.match(source, pos)
  161. token_type = 'operator'
  162. else:
  163. match = regex_re.match(source, pos)
  164. token_type = 'regexp'
  165. if match is None:
  166. # woops. invalid syntax. jump one char ahead and try again.
  167. pos += 1
  168. continue
  169. token_value = match.group()
  170. if token_type is not None:
  171. token = Token(token_type, token_value, lineno)
  172. may_divide = indicates_division(token)
  173. yield token
  174. lineno += len(line_re.findall(token_value))
  175. pos = match.end()