lexer.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. #
  2. # Copyright (C) 2009-2020 the sqlparse authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of python-sqlparse and is released under
  6. # the BSD License: https://opensource.org/licenses/BSD-3-Clause
  7. """SQL Lexer"""
  8. import re
  9. from threading import Lock
  10. # This code is based on the SqlLexer in pygments.
  11. # http://pygments.org/
  12. # It's separated from the rest of pygments to increase performance
  13. # and to allow some customizations.
  14. from io import TextIOBase
  15. from sqlparse import tokens, keywords
  16. from sqlparse.utils import consume
  17. class Lexer:
  18. """The Lexer supports configurable syntax.
  19. To add support for additional keywords, use the `add_keywords` method."""
  20. _default_instance = None
  21. _lock = Lock()
  22. # Development notes:
  23. # - This class is prepared to be able to support additional SQL dialects
  24. # in the future by adding additional functions that take the place of
  25. # the function default_initialization().
  26. # - The lexer class uses an explicit singleton behavior with the
  27. # instance-getter method get_default_instance(). This mechanism has
  28. # the advantage that the call signature of the entry-points to the
  29. # sqlparse library are not affected. Also, usage of sqlparse in third
  30. # party code does not need to be adapted. On the other hand, the current
  31. # implementation does not easily allow for multiple SQL dialects to be
  32. # parsed in the same process.
  33. # Such behavior can be supported in the future by passing a
  34. # suitably initialized lexer object as an additional parameter to the
  35. # entry-point functions (such as `parse`). Code will need to be written
  36. # to pass down and utilize such an object. The current implementation
  37. # is prepared to support this thread safe approach without the
  38. # default_instance part needing to change interface.
  39. @classmethod
  40. def get_default_instance(cls):
  41. """Returns the lexer instance used internally
  42. by the sqlparse core functions."""
  43. with cls._lock:
  44. if cls._default_instance is None:
  45. cls._default_instance = cls()
  46. cls._default_instance.default_initialization()
  47. return cls._default_instance
  48. def default_initialization(self):
  49. """Initialize the lexer with default dictionaries.
  50. Useful if you need to revert custom syntax settings."""
  51. self.clear()
  52. self.set_SQL_REGEX(keywords.SQL_REGEX)
  53. self.add_keywords(keywords.KEYWORDS_COMMON)
  54. self.add_keywords(keywords.KEYWORDS_ORACLE)
  55. self.add_keywords(keywords.KEYWORDS_MYSQL)
  56. self.add_keywords(keywords.KEYWORDS_PLPGSQL)
  57. self.add_keywords(keywords.KEYWORDS_HQL)
  58. self.add_keywords(keywords.KEYWORDS_MSACCESS)
  59. self.add_keywords(keywords.KEYWORDS_SNOWFLAKE)
  60. self.add_keywords(keywords.KEYWORDS_BIGQUERY)
  61. self.add_keywords(keywords.KEYWORDS)
  62. def clear(self):
  63. """Clear all syntax configurations.
  64. Useful if you want to load a reduced set of syntax configurations.
  65. After this call, regexps and keyword dictionaries need to be loaded
  66. to make the lexer functional again."""
  67. self._SQL_REGEX = []
  68. self._keywords = []
  69. def set_SQL_REGEX(self, SQL_REGEX):
  70. """Set the list of regex that will parse the SQL."""
  71. FLAGS = re.IGNORECASE | re.UNICODE
  72. self._SQL_REGEX = [
  73. (re.compile(rx, FLAGS).match, tt)
  74. for rx, tt in SQL_REGEX
  75. ]
  76. def add_keywords(self, keywords):
  77. """Add keyword dictionaries. Keywords are looked up in the same order
  78. that dictionaries were added."""
  79. self._keywords.append(keywords)
  80. def is_keyword(self, value):
  81. """Checks for a keyword.
  82. If the given value is in one of the KEYWORDS_* dictionary
  83. it's considered a keyword. Otherwise, tokens.Name is returned.
  84. """
  85. val = value.upper()
  86. for kwdict in self._keywords:
  87. if val in kwdict:
  88. return kwdict[val], value
  89. else:
  90. return tokens.Name, value
  91. def get_tokens(self, text, encoding=None):
  92. """
  93. Return an iterable of (tokentype, value) pairs generated from
  94. `text`. If `unfiltered` is set to `True`, the filtering mechanism
  95. is bypassed even if filters are defined.
  96. Also preprocess the text, i.e. expand tabs and strip it if
  97. wanted and applies registered filters.
  98. Split ``text`` into (tokentype, text) pairs.
  99. ``stack`` is the initial stack (default: ``['root']``)
  100. """
  101. if isinstance(text, TextIOBase):
  102. text = text.read()
  103. if isinstance(text, str):
  104. pass
  105. elif isinstance(text, bytes):
  106. if encoding:
  107. text = text.decode(encoding)
  108. else:
  109. try:
  110. text = text.decode('utf-8')
  111. except UnicodeDecodeError:
  112. text = text.decode('unicode-escape')
  113. else:
  114. raise TypeError("Expected text or file-like object, got {!r}".
  115. format(type(text)))
  116. iterable = enumerate(text)
  117. for pos, char in iterable:
  118. for rexmatch, action in self._SQL_REGEX:
  119. m = rexmatch(text, pos)
  120. if not m:
  121. continue
  122. elif isinstance(action, tokens._TokenType):
  123. yield action, m.group()
  124. elif action is keywords.PROCESS_AS_KEYWORD:
  125. yield self.is_keyword(m.group())
  126. consume(iterable, m.end() - pos - 1)
  127. break
  128. else:
  129. yield tokens.Error, char
  130. def tokenize(sql, encoding=None):
  131. """Tokenize sql.
  132. Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
  133. of ``(token type, value)`` items.
  134. """
  135. return Lexer.get_default_instance().get_tokens(sql, encoding)