tokenizer.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
  2. # Copyright (C) 2003-2017 Nominum, Inc.
  3. #
  4. # Permission to use, copy, modify, and distribute this software and its
  5. # documentation for any purpose with or without fee is hereby granted,
  6. # provided that the above copyright notice and this permission notice
  7. # appear in all copies.
  8. #
  9. # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
  10. # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
  12. # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
  15. # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. """Tokenize DNS zone file format"""
  17. import io
  18. import sys
  19. from typing import Any, List, Optional, Tuple
  20. import dns.exception
  21. import dns.name
  22. import dns.ttl
  23. _DELIMITERS = {" ", "\t", "\n", ";", "(", ")", '"'}
  24. _QUOTING_DELIMITERS = {'"'}
  25. EOF = 0
  26. EOL = 1
  27. WHITESPACE = 2
  28. IDENTIFIER = 3
  29. QUOTED_STRING = 4
  30. COMMENT = 5
  31. DELIMITER = 6
  32. class UngetBufferFull(dns.exception.DNSException):
  33. """An attempt was made to unget a token when the unget buffer was full."""
  34. class Token:
  35. """A DNS zone file format token.
  36. ttype: The token type
  37. value: The token value
  38. has_escape: Does the token value contain escapes?
  39. """
  40. def __init__(
  41. self,
  42. ttype: int,
  43. value: Any = "",
  44. has_escape: bool = False,
  45. comment: Optional[str] = None,
  46. ):
  47. """Initialize a token instance."""
  48. self.ttype = ttype
  49. self.value = value
  50. self.has_escape = has_escape
  51. self.comment = comment
  52. def is_eof(self) -> bool:
  53. return self.ttype == EOF
  54. def is_eol(self) -> bool:
  55. return self.ttype == EOL
  56. def is_whitespace(self) -> bool:
  57. return self.ttype == WHITESPACE
  58. def is_identifier(self) -> bool:
  59. return self.ttype == IDENTIFIER
  60. def is_quoted_string(self) -> bool:
  61. return self.ttype == QUOTED_STRING
  62. def is_comment(self) -> bool:
  63. return self.ttype == COMMENT
  64. def is_delimiter(self) -> bool: # pragma: no cover (we don't return delimiters yet)
  65. return self.ttype == DELIMITER
  66. def is_eol_or_eof(self) -> bool:
  67. return self.ttype == EOL or self.ttype == EOF
  68. def __eq__(self, other):
  69. if not isinstance(other, Token):
  70. return False
  71. return self.ttype == other.ttype and self.value == other.value
  72. def __ne__(self, other):
  73. if not isinstance(other, Token):
  74. return True
  75. return self.ttype != other.ttype or self.value != other.value
  76. def __str__(self):
  77. return '%d "%s"' % (self.ttype, self.value)
  78. def unescape(self) -> "Token":
  79. if not self.has_escape:
  80. return self
  81. unescaped = ""
  82. l = len(self.value)
  83. i = 0
  84. while i < l:
  85. c = self.value[i]
  86. i += 1
  87. if c == "\\":
  88. if i >= l: # pragma: no cover (can't happen via get())
  89. raise dns.exception.UnexpectedEnd
  90. c = self.value[i]
  91. i += 1
  92. if c.isdigit():
  93. if i >= l:
  94. raise dns.exception.UnexpectedEnd
  95. c2 = self.value[i]
  96. i += 1
  97. if i >= l:
  98. raise dns.exception.UnexpectedEnd
  99. c3 = self.value[i]
  100. i += 1
  101. if not (c2.isdigit() and c3.isdigit()):
  102. raise dns.exception.SyntaxError
  103. codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
  104. if codepoint > 255:
  105. raise dns.exception.SyntaxError
  106. c = chr(codepoint)
  107. unescaped += c
  108. return Token(self.ttype, unescaped)
  109. def unescape_to_bytes(self) -> "Token":
  110. # We used to use unescape() for TXT-like records, but this
  111. # caused problems as we'd process DNS escapes into Unicode code
  112. # points instead of byte values, and then a to_text() of the
  113. # processed data would not equal the original input. For
  114. # example, \226 in the TXT record would have a to_text() of
  115. # \195\162 because we applied UTF-8 encoding to Unicode code
  116. # point 226.
  117. #
  118. # We now apply escapes while converting directly to bytes,
  119. # avoiding this double encoding.
  120. #
  121. # This code also handles cases where the unicode input has
  122. # non-ASCII code-points in it by converting it to UTF-8. TXT
  123. # records aren't defined for Unicode, but this is the best we
  124. # can do to preserve meaning. For example,
  125. #
  126. # foo\u200bbar
  127. #
  128. # (where \u200b is Unicode code point 0x200b) will be treated
  129. # as if the input had been the UTF-8 encoding of that string,
  130. # namely:
  131. #
  132. # foo\226\128\139bar
  133. #
  134. unescaped = b""
  135. l = len(self.value)
  136. i = 0
  137. while i < l:
  138. c = self.value[i]
  139. i += 1
  140. if c == "\\":
  141. if i >= l: # pragma: no cover (can't happen via get())
  142. raise dns.exception.UnexpectedEnd
  143. c = self.value[i]
  144. i += 1
  145. if c.isdigit():
  146. if i >= l:
  147. raise dns.exception.UnexpectedEnd
  148. c2 = self.value[i]
  149. i += 1
  150. if i >= l:
  151. raise dns.exception.UnexpectedEnd
  152. c3 = self.value[i]
  153. i += 1
  154. if not (c2.isdigit() and c3.isdigit()):
  155. raise dns.exception.SyntaxError
  156. codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
  157. if codepoint > 255:
  158. raise dns.exception.SyntaxError
  159. unescaped += b"%c" % (codepoint)
  160. else:
  161. # Note that as mentioned above, if c is a Unicode
  162. # code point outside of the ASCII range, then this
  163. # += is converting that code point to its UTF-8
  164. # encoding and appending multiple bytes to
  165. # unescaped.
  166. unescaped += c.encode()
  167. else:
  168. unescaped += c.encode()
  169. return Token(self.ttype, bytes(unescaped))
  170. class Tokenizer:
  171. """A DNS zone file format tokenizer.
  172. A token object is basically a (type, value) tuple. The valid
  173. types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING,
  174. COMMENT, and DELIMITER.
  175. file: The file to tokenize
  176. ungotten_char: The most recently ungotten character, or None.
  177. ungotten_token: The most recently ungotten token, or None.
  178. multiline: The current multiline level. This value is increased
  179. by one every time a '(' delimiter is read, and decreased by one every time
  180. a ')' delimiter is read.
  181. quoting: This variable is true if the tokenizer is currently
  182. reading a quoted string.
  183. eof: This variable is true if the tokenizer has encountered EOF.
  184. delimiters: The current delimiter dictionary.
  185. line_number: The current line number
  186. filename: A filename that will be returned by the where() method.
  187. idna_codec: A dns.name.IDNACodec, specifies the IDNA
  188. encoder/decoder. If None, the default IDNA 2003
  189. encoder/decoder is used.
  190. """
  191. def __init__(
  192. self,
  193. f: Any = sys.stdin,
  194. filename: Optional[str] = None,
  195. idna_codec: Optional[dns.name.IDNACodec] = None,
  196. ):
  197. """Initialize a tokenizer instance.
  198. f: The file to tokenize. The default is sys.stdin.
  199. This parameter may also be a string, in which case the tokenizer
  200. will take its input from the contents of the string.
  201. filename: the name of the filename that the where() method
  202. will return.
  203. idna_codec: A dns.name.IDNACodec, specifies the IDNA
  204. encoder/decoder. If None, the default IDNA 2003
  205. encoder/decoder is used.
  206. """
  207. if isinstance(f, str):
  208. f = io.StringIO(f)
  209. if filename is None:
  210. filename = "<string>"
  211. elif isinstance(f, bytes):
  212. f = io.StringIO(f.decode())
  213. if filename is None:
  214. filename = "<string>"
  215. else:
  216. if filename is None:
  217. if f is sys.stdin:
  218. filename = "<stdin>"
  219. else:
  220. filename = "<file>"
  221. self.file = f
  222. self.ungotten_char: Optional[str] = None
  223. self.ungotten_token: Optional[Token] = None
  224. self.multiline = 0
  225. self.quoting = False
  226. self.eof = False
  227. self.delimiters = _DELIMITERS
  228. self.line_number = 1
  229. assert filename is not None
  230. self.filename = filename
  231. if idna_codec is None:
  232. self.idna_codec: dns.name.IDNACodec = dns.name.IDNA_2003
  233. else:
  234. self.idna_codec = idna_codec
  235. def _get_char(self) -> str:
  236. """Read a character from input."""
  237. if self.ungotten_char is None:
  238. if self.eof:
  239. c = ""
  240. else:
  241. c = self.file.read(1)
  242. if c == "":
  243. self.eof = True
  244. elif c == "\n":
  245. self.line_number += 1
  246. else:
  247. c = self.ungotten_char
  248. self.ungotten_char = None
  249. return c
  250. def where(self) -> Tuple[str, int]:
  251. """Return the current location in the input.
  252. Returns a (string, int) tuple. The first item is the filename of
  253. the input, the second is the current line number.
  254. """
  255. return (self.filename, self.line_number)
  256. def _unget_char(self, c: str) -> None:
  257. """Unget a character.
  258. The unget buffer for characters is only one character large; it is
  259. an error to try to unget a character when the unget buffer is not
  260. empty.
  261. c: the character to unget
  262. raises UngetBufferFull: there is already an ungotten char
  263. """
  264. if self.ungotten_char is not None:
  265. # this should never happen!
  266. raise UngetBufferFull # pragma: no cover
  267. self.ungotten_char = c
  268. def skip_whitespace(self) -> int:
  269. """Consume input until a non-whitespace character is encountered.
  270. The non-whitespace character is then ungotten, and the number of
  271. whitespace characters consumed is returned.
  272. If the tokenizer is in multiline mode, then newlines are whitespace.
  273. Returns the number of characters skipped.
  274. """
  275. skipped = 0
  276. while True:
  277. c = self._get_char()
  278. if c != " " and c != "\t":
  279. if (c != "\n") or not self.multiline:
  280. self._unget_char(c)
  281. return skipped
  282. skipped += 1
  283. def get(self, want_leading: bool = False, want_comment: bool = False) -> Token:
  284. """Get the next token.
  285. want_leading: If True, return a WHITESPACE token if the
  286. first character read is whitespace. The default is False.
  287. want_comment: If True, return a COMMENT token if the
  288. first token read is a comment. The default is False.
  289. Raises dns.exception.UnexpectedEnd: input ended prematurely
  290. Raises dns.exception.SyntaxError: input was badly formed
  291. Returns a Token.
  292. """
  293. if self.ungotten_token is not None:
  294. utoken = self.ungotten_token
  295. self.ungotten_token = None
  296. if utoken.is_whitespace():
  297. if want_leading:
  298. return utoken
  299. elif utoken.is_comment():
  300. if want_comment:
  301. return utoken
  302. else:
  303. return utoken
  304. skipped = self.skip_whitespace()
  305. if want_leading and skipped > 0:
  306. return Token(WHITESPACE, " ")
  307. token = ""
  308. ttype = IDENTIFIER
  309. has_escape = False
  310. while True:
  311. c = self._get_char()
  312. if c == "" or c in self.delimiters:
  313. if c == "" and self.quoting:
  314. raise dns.exception.UnexpectedEnd
  315. if token == "" and ttype != QUOTED_STRING:
  316. if c == "(":
  317. self.multiline += 1
  318. self.skip_whitespace()
  319. continue
  320. elif c == ")":
  321. if self.multiline <= 0:
  322. raise dns.exception.SyntaxError
  323. self.multiline -= 1
  324. self.skip_whitespace()
  325. continue
  326. elif c == '"':
  327. if not self.quoting:
  328. self.quoting = True
  329. self.delimiters = _QUOTING_DELIMITERS
  330. ttype = QUOTED_STRING
  331. continue
  332. else:
  333. self.quoting = False
  334. self.delimiters = _DELIMITERS
  335. self.skip_whitespace()
  336. continue
  337. elif c == "\n":
  338. return Token(EOL, "\n")
  339. elif c == ";":
  340. while 1:
  341. c = self._get_char()
  342. if c == "\n" or c == "":
  343. break
  344. token += c
  345. if want_comment:
  346. self._unget_char(c)
  347. return Token(COMMENT, token)
  348. elif c == "":
  349. if self.multiline:
  350. raise dns.exception.SyntaxError(
  351. "unbalanced parentheses"
  352. )
  353. return Token(EOF, comment=token)
  354. elif self.multiline:
  355. self.skip_whitespace()
  356. token = ""
  357. continue
  358. else:
  359. return Token(EOL, "\n", comment=token)
  360. else:
  361. # This code exists in case we ever want a
  362. # delimiter to be returned. It never produces
  363. # a token currently.
  364. token = c
  365. ttype = DELIMITER
  366. else:
  367. self._unget_char(c)
  368. break
  369. elif self.quoting and c == "\n":
  370. raise dns.exception.SyntaxError("newline in quoted string")
  371. elif c == "\\":
  372. #
  373. # It's an escape. Put it and the next character into
  374. # the token; it will be checked later for goodness.
  375. #
  376. token += c
  377. has_escape = True
  378. c = self._get_char()
  379. if c == "" or (c == "\n" and not self.quoting):
  380. raise dns.exception.UnexpectedEnd
  381. token += c
  382. if token == "" and ttype != QUOTED_STRING:
  383. if self.multiline:
  384. raise dns.exception.SyntaxError("unbalanced parentheses")
  385. ttype = EOF
  386. return Token(ttype, token, has_escape)
  387. def unget(self, token: Token) -> None:
  388. """Unget a token.
  389. The unget buffer for tokens is only one token large; it is
  390. an error to try to unget a token when the unget buffer is not
  391. empty.
  392. token: the token to unget
  393. Raises UngetBufferFull: there is already an ungotten token
  394. """
  395. if self.ungotten_token is not None:
  396. raise UngetBufferFull
  397. self.ungotten_token = token
  398. def next(self):
  399. """Return the next item in an iteration.
  400. Returns a Token.
  401. """
  402. token = self.get()
  403. if token.is_eof():
  404. raise StopIteration
  405. return token
  406. __next__ = next
  407. def __iter__(self):
  408. return self
  409. # Helpers
  410. def get_int(self, base: int = 10) -> int:
  411. """Read the next token and interpret it as an unsigned integer.
  412. Raises dns.exception.SyntaxError if not an unsigned integer.
  413. Returns an int.
  414. """
  415. token = self.get().unescape()
  416. if not token.is_identifier():
  417. raise dns.exception.SyntaxError("expecting an identifier")
  418. if not token.value.isdigit():
  419. raise dns.exception.SyntaxError("expecting an integer")
  420. return int(token.value, base)
  421. def get_uint8(self) -> int:
  422. """Read the next token and interpret it as an 8-bit unsigned
  423. integer.
  424. Raises dns.exception.SyntaxError if not an 8-bit unsigned integer.
  425. Returns an int.
  426. """
  427. value = self.get_int()
  428. if value < 0 or value > 255:
  429. raise dns.exception.SyntaxError(
  430. "%d is not an unsigned 8-bit integer" % value
  431. )
  432. return value
  433. def get_uint16(self, base: int = 10) -> int:
  434. """Read the next token and interpret it as a 16-bit unsigned
  435. integer.
  436. Raises dns.exception.SyntaxError if not a 16-bit unsigned integer.
  437. Returns an int.
  438. """
  439. value = self.get_int(base=base)
  440. if value < 0 or value > 65535:
  441. if base == 8:
  442. raise dns.exception.SyntaxError(
  443. f"{value:o} is not an octal unsigned 16-bit integer"
  444. )
  445. else:
  446. raise dns.exception.SyntaxError(
  447. "%d is not an unsigned 16-bit integer" % value
  448. )
  449. return value
  450. def get_uint32(self, base: int = 10) -> int:
  451. """Read the next token and interpret it as a 32-bit unsigned
  452. integer.
  453. Raises dns.exception.SyntaxError if not a 32-bit unsigned integer.
  454. Returns an int.
  455. """
  456. value = self.get_int(base=base)
  457. if value < 0 or value > 4294967295:
  458. raise dns.exception.SyntaxError(
  459. "%d is not an unsigned 32-bit integer" % value
  460. )
  461. return value
  462. def get_uint48(self, base: int = 10) -> int:
  463. """Read the next token and interpret it as a 48-bit unsigned
  464. integer.
  465. Raises dns.exception.SyntaxError if not a 48-bit unsigned integer.
  466. Returns an int.
  467. """
  468. value = self.get_int(base=base)
  469. if value < 0 or value > 281474976710655:
  470. raise dns.exception.SyntaxError(
  471. "%d is not an unsigned 48-bit integer" % value
  472. )
  473. return value
  474. def get_string(self, max_length: Optional[int] = None) -> str:
  475. """Read the next token and interpret it as a string.
  476. Raises dns.exception.SyntaxError if not a string.
  477. Raises dns.exception.SyntaxError if token value length
  478. exceeds max_length (if specified).
  479. Returns a string.
  480. """
  481. token = self.get().unescape()
  482. if not (token.is_identifier() or token.is_quoted_string()):
  483. raise dns.exception.SyntaxError("expecting a string")
  484. if max_length and len(token.value) > max_length:
  485. raise dns.exception.SyntaxError("string too long")
  486. return token.value
  487. def get_identifier(self) -> str:
  488. """Read the next token, which should be an identifier.
  489. Raises dns.exception.SyntaxError if not an identifier.
  490. Returns a string.
  491. """
  492. token = self.get().unescape()
  493. if not token.is_identifier():
  494. raise dns.exception.SyntaxError("expecting an identifier")
  495. return token.value
  496. def get_remaining(self, max_tokens: Optional[int] = None) -> List[Token]:
  497. """Return the remaining tokens on the line, until an EOL or EOF is seen.
  498. max_tokens: If not None, stop after this number of tokens.
  499. Returns a list of tokens.
  500. """
  501. tokens = []
  502. while True:
  503. token = self.get()
  504. if token.is_eol_or_eof():
  505. self.unget(token)
  506. break
  507. tokens.append(token)
  508. if len(tokens) == max_tokens:
  509. break
  510. return tokens
  511. def concatenate_remaining_identifiers(self, allow_empty: bool = False) -> str:
  512. """Read the remaining tokens on the line, which should be identifiers.
  513. Raises dns.exception.SyntaxError if there are no remaining tokens,
  514. unless `allow_empty=True` is given.
  515. Raises dns.exception.SyntaxError if a token is seen that is not an
  516. identifier.
  517. Returns a string containing a concatenation of the remaining
  518. identifiers.
  519. """
  520. s = ""
  521. while True:
  522. token = self.get().unescape()
  523. if token.is_eol_or_eof():
  524. self.unget(token)
  525. break
  526. if not token.is_identifier():
  527. raise dns.exception.SyntaxError
  528. s += token.value
  529. if not (allow_empty or s):
  530. raise dns.exception.SyntaxError("expecting another identifier")
  531. return s
  532. def as_name(
  533. self,
  534. token: Token,
  535. origin: Optional[dns.name.Name] = None,
  536. relativize: bool = False,
  537. relativize_to: Optional[dns.name.Name] = None,
  538. ) -> dns.name.Name:
  539. """Try to interpret the token as a DNS name.
  540. Raises dns.exception.SyntaxError if not a name.
  541. Returns a dns.name.Name.
  542. """
  543. if not token.is_identifier():
  544. raise dns.exception.SyntaxError("expecting an identifier")
  545. name = dns.name.from_text(token.value, origin, self.idna_codec)
  546. return name.choose_relativity(relativize_to or origin, relativize)
  547. def get_name(
  548. self,
  549. origin: Optional[dns.name.Name] = None,
  550. relativize: bool = False,
  551. relativize_to: Optional[dns.name.Name] = None,
  552. ) -> dns.name.Name:
  553. """Read the next token and interpret it as a DNS name.
  554. Raises dns.exception.SyntaxError if not a name.
  555. Returns a dns.name.Name.
  556. """
  557. token = self.get()
  558. return self.as_name(token, origin, relativize, relativize_to)
  559. def get_eol_as_token(self) -> Token:
  560. """Read the next token and raise an exception if it isn't EOL or
  561. EOF.
  562. Returns a string.
  563. """
  564. token = self.get()
  565. if not token.is_eol_or_eof():
  566. raise dns.exception.SyntaxError(
  567. 'expected EOL or EOF, got %d "%s"' % (token.ttype, token.value)
  568. )
  569. return token
  570. def get_eol(self) -> str:
  571. return self.get_eol_as_token().value
  572. def get_ttl(self) -> int:
  573. """Read the next token and interpret it as a DNS TTL.
  574. Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
  575. identifier or badly formed.
  576. Returns an int.
  577. """
  578. token = self.get().unescape()
  579. if not token.is_identifier():
  580. raise dns.exception.SyntaxError("expecting an identifier")
  581. return dns.ttl.from_text(token.value)