Skip to content

Commit

Permalink
Cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
DeltaRazero committed Nov 9, 2020
1 parent 0430885 commit 1e4436e
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 10 deletions.
9 changes: 3 additions & 6 deletions lexer2/lexer/_abstract_lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,12 +216,10 @@ def _GNT_P1_ScanChars(self) -> _Token:
# c = 0
# for i in range(self._ts._strBufferPos, self._ts._strBufferSize):
# char = self._ts._strBuffer[i]
# # char = ord(self._ts._strBuffer[i])


# SPACE character
if (char == ' '):
# if (char == 32):
if (flag_return_space):
token = _Token(
_predefs.space.id_,
Expand All @@ -236,7 +234,6 @@ def _GNT_P1_ScanChars(self) -> _Token:

# NEWLINE character
elif (char == '\n'):
# elif (char == 10):
if (flags.newline == _flags.HFlag.HANDLE_AND_RETURN):
token = _Token(
_predefs.newline.id_,
Expand All @@ -251,7 +248,6 @@ def _GNT_P1_ScanChars(self) -> _Token:

# TAB character
elif (char == '\t'):
# elif (char == 9):
if (flags.tab == _flags.HFlag.HANDLE_AND_RETURN):
token = _Token(
_predefs.tab.id_,
Expand Down Expand Up @@ -296,7 +292,7 @@ def _GNT_P2_MatchRegexes(self) -> _Token:
the unknown token type.
"""
# txt_pos = self._ts.GetTextPosition()
txt_pos = self._ts._tp
txt_pos: _file.TextPosition = self._ts._tp

# Match mainloop
ruleset: _rule.Ruleset_t = self._rulesets[-1]
Expand Down Expand Up @@ -327,7 +323,8 @@ def _GNT_P2_MatchRegexes(self) -> _Token:
temp_token = self._MatchRule(rule)

n1 = len(temp_token.data)
n2 = self._ts.GetChunkSize() - self._ts.GetStrBufferPosition()
# n2 = self._ts.GetChunkSize() - self._ts.GetStrBufferPosition()
n2 = self._ts._chunkSize - self._ts._strBufferPos

# Update positions
self._ts.Update(len(temp_token.data))
Expand Down
8 changes: 4 additions & 4 deletions lexer2/predefs.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, regexPatternStart: str, regexPatternEnd: str):


class SinglelineComment (BaseComment):
"""Rule template for filtering single-line comments.
"""Rule template for filtering singleline comments.
"""

# --- CONSTRUCTOR --- #
Expand All @@ -66,7 +66,7 @@ def __init__(self, identifyingRegex: str):


class MultilineComment (BaseComment):
"""Rule template for filtering single-line comments.
"""Rule template for filtering singleline comments.
"""

# --- CONSTRUCTOR --- #
Expand All @@ -93,8 +93,8 @@ def __init__(self, identifyingStartRegex: str, identifyingEndRegex: str):

# ***************************************************************************************

# def __MakeDummyRule(id: str) -> _Rule: return _Rule(id, "")
__MakeDummyRule: _t.Callable[[str], _Rule] = lambda id: _Rule(id, r"a^")
def __MakeDummyRule(id: str) -> _Rule: return _Rule(id, r"a^")
# __MakeDummyRule: _t.Callable[[str], _Rule] = lambda id: _Rule(id, r"a^")

# These rule object instances are not meant to be used in rulesets, as they will not be
# used / will not match anything.
Expand Down

0 comments on commit 1e4436e

Please sign in to comment.