From daae1d40b426a8cb68e842805bbb36e2eb859d0f Mon Sep 17 00:00:00 2001 From: erwann lesech Date: Mon, 15 Jan 2024 13:57:07 +0100 Subject: [PATCH] feat: add new tokens except double quote and variable --- src/Makefile.am | 2 +- src/lexer/lexer.c | 122 +++++++++++---------- src/lexer/lexer.h | 9 +- src/lexer/lexer_utils.c | 36 +++++-- src/lexer/tests/lexer2_tests.c | 192 ++++++++++++++++++++++++++++++--- src/lexer/tests/lexer_tests.c | 24 ++--- src/lexer/token.h | 16 +-- 7 files changed, 297 insertions(+), 104 deletions(-) diff --git a/src/Makefile.am b/src/Makefile.am index 07212fec..daf06742 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -11,7 +11,7 @@ bin_PROGRAMS = 42sh 42sh_CPPFLAGS = -I%D% -42sh_CFLAGS = -std=c99 -Werror -Wall -Wextra -Wvla -pedantic +42sh_CFLAGS = -std=c99 -Werror -Wall -Wextra -Wvla -pedantic -fsanitize=address -g 42sh_LDADD = lexer/liblexer.a \ ast/libast.a \ diff --git a/src/lexer/lexer.c b/src/lexer/lexer.c index 9cb5c7a8..953419e9 100644 --- a/src/lexer/lexer.c +++ b/src/lexer/lexer.c @@ -15,32 +15,18 @@ #include #include -struct lex_match lex_match[25] = { { "if", TOKEN_IF }, - { "then", TOKEN_THEN }, - { "elif", TOKEN_ELIF }, - { "else", TOKEN_ELSE }, - { "fi", TOKEN_FI }, - { ";", TOKEN_SEMICOLON }, - { "\n", TOKEN_EOL }, - { "\0", TOKEN_EOF }, - - { "&&", TOKEN_AND }, - { "||", TOKEN_OR }, - { "|", TOKEN_PIPE }, - { "!", TOKEN_NEGATE }, - { "[0-9]*<", TOKEN_INPUT_REDIR }, - { "[0-9]*>", TOKEN_OUTPUT_REDIR }, - { "[0-9]*>>", TOKEN_APPEND }, - { "[0-9]*<&", TOKEN_DUP_INPUT }, - { "[0-9]*>&", TOKEN_DUP_INPUT_OUTPUT }, - { "[0-9]*>|", TOKEN_NOCLOBBER }, - { "[0-9]*<>", TOKEN_DUP_INPUT_OUTPUT }, - { "while", TOKEN_WHILE }, - { "until", TOKEN_UNTIL }, - { "for", TOKEN_FOR }, - { "do", TOKEN_DO }, - { "done", TOKEN_DONE }, - { "$*", TOKEN_VARIABLE } }; +struct lex_match lex_match[] = { + { "if", TOKEN_IF }, { "then", TOKEN_THEN }, { "elif", TOKEN_ELIF }, + { "else", TOKEN_ELSE }, { "fi", TOKEN_FI }, { ";", TOKEN_SEMICOLON }, + { "\n", TOKEN_EOL }, { "\0", TOKEN_EOF }, + + { "&&", TOKEN_AND }, { "||", TOKEN_OR }, { "|", TOKEN_PIPE }, + { "!", TOKEN_NEGATE }, { "<", TOKEN_REDIR }, { ">", TOKEN_REDIR }, + { ">>", TOKEN_REDIR }, { "<&", TOKEN_REDIR }, { ">&", TOKEN_REDIR }, + { ">|", TOKEN_REDIR }, { "<>", TOKEN_REDIR }, + + { "$*", TOKEN_VARIABLE } +}; struct lexer *lexer_new(const char *input) { @@ -71,52 +57,75 @@ char *get_word(struct lexer *lexer, bool *is_diactivated) { char *word = malloc(sizeof(char) * 2); unsigned word_index = 0; - if (lexer->data[lexer->index] == '\0') - { - ++lexer->index; - word[0] = '\0'; - return word; - } + if (lexer->data[lexer->index] == ';' || lexer->data[lexer->index] == '\n') { word[0] = lexer->data[lexer->index]; - word[1] = '\0'; + word_index = 1; ++lexer->index; if (lexer->data[lexer->index] == ' ') { ++lexer->index; } - return word; } - if (lexer->data[lexer->index] == '#') + else if (lexer->data[lexer->index] == '#') + { + word = handle_comment(lexer, word, &word_index); + } + else if (lexer->data[lexer->index] == '>' + || lexer->data[lexer->index] == '<') { - return handle_comment(lexer, word, 0); + word = handle_redir(lexer, &word_index); } - while (lexer->data[lexer->index] != ' ' && lexer->data[lexer->index] != '\0' - && lexer->data[lexer->index] != ';' - && lexer->data[lexer->index] != '\n' - && lexer->data[lexer->index] != '\t') + else if (lexer->data[lexer->index] == '|' + || lexer->data[lexer->index] == '&') { - word = realloc(word, sizeof(char) * (word_index + 1)); - word[word_index] = lexer->data[lexer->index]; - ++word_index; + word[0] = lexer->data[lexer->index]; + word_index = 1; ++lexer->index; - if (lexer->data[lexer->index - 1] == '\\') + + if (lexer->data[lexer->index] == '|' + || lexer->data[lexer->index] == '&') { - if (!handle_backslash(lexer, is_diactivated, word, word_index)) - { - return word; - } + word = realloc(word, sizeof(char) * (word_index + 1)); + word[word_index] = lexer->data[lexer->index]; + word_index = 2; + ++lexer->index; } - else if (lexer->data[lexer->index - 1] == '\'') + } + else + { + while (lexer->data[lexer->index] != ' ' + && lexer->data[lexer->index] != '\0' + && lexer->data[lexer->index] != ';' + && lexer->data[lexer->index] != '\n' + && lexer->data[lexer->index] != '\t' + && lexer->data[lexer->index] != '>' + && lexer->data[lexer->index] != '<' + && lexer->data[lexer->index] != '|' + && lexer->data[lexer->index] != '&') { - word = - handle_simple_quote(lexer, is_diactivated, word, &word_index); - if (!word) + word = realloc(word, sizeof(char) * (word_index + 1)); + word[word_index] = lexer->data[lexer->index]; + ++word_index; + ++lexer->index; + if (lexer->data[lexer->index - 1] == '\\') + { + if (!handle_backslash(lexer, is_diactivated, word, word_index)) + { + return word; + } + } + else if (lexer->data[lexer->index - 1] == '\'') { - return NULL; + word = handle_simple_quote(lexer, is_diactivated, word, + &word_index); + if (!word) + { + return NULL; + } + lexer->index += 1; } - lexer->index += 1; } } word = realloc(word, sizeof(char) * (word_index + 1)); @@ -192,6 +201,9 @@ struct token lexer_pop(struct lexer *lexer) return token; } struct token token = parse_input_for_tok(lexer); - lexer->curr_tok = token; + if (token.type != TOKEN_EOF) + { + lexer->curr_tok = token; + } return token; } diff --git a/src/lexer/lexer.h b/src/lexer/lexer.h index 63452eb6..f832e685 100644 --- a/src/lexer/lexer.h +++ b/src/lexer/lexer.h @@ -96,7 +96,14 @@ char *handle_simple_quote(struct lexer *lexer, bool *is_diactivated, char *word, * * \return The next word. */ -char *handle_comment(struct lexer *lexer, char *word, unsigned word_index); +char *handle_comment(struct lexer *lexer, char *word, unsigned *word_index); + +/** + * \brief Handle the redirection character. + * \param lexer The lexer. + * \return The next redirection word. + */ +char *handle_redir(struct lexer *lexer, unsigned *word_index); /** * \brief Returns the next word in the input string. diff --git a/src/lexer/lexer_utils.c b/src/lexer/lexer_utils.c index 908a877c..c14a816e 100644 --- a/src/lexer/lexer_utils.c +++ b/src/lexer/lexer_utils.c @@ -42,7 +42,7 @@ char *handle_simple_quote(struct lexer *lexer, bool *is_diactivated, char *word, return word; } -char *handle_comment(struct lexer *lexer, char *word, unsigned word_index) +char *handle_comment(struct lexer *lexer, char *word, unsigned *word_index) { // Skip the comment ++lexer->index; @@ -53,13 +53,11 @@ char *handle_comment(struct lexer *lexer, char *word, unsigned word_index) { ++lexer->index; } - word[word_index] = lexer->data[lexer->index]; - ++lexer->index; - // If the comment isn't the last thing in the string, we need to add a '\0' - // at the end of the word. - if (word[word_index] != '\0') + word[*word_index] = lexer->data[lexer->index]; + *word_index += 1; + if (lexer->data[lexer->index] != '\0') { - word[word_index + 1] = '\0'; + ++lexer->index; } // Skip the spaces after the comment @@ -68,4 +66,28 @@ char *handle_comment(struct lexer *lexer, char *word, unsigned word_index) ++lexer->index; } return word; +} + +char *handle_redir(struct lexer *lexer, unsigned *word_index) +{ + char *redir = malloc(sizeof(char) * 2); + redir[0] = lexer->data[lexer->index]; + ++lexer->index; + *word_index += 1; + if (lexer->data[lexer->index] == '>' || lexer->data[lexer->index] == '&' + || lexer->data[lexer->index] == '|') + { + *word_index += 1; + redir = realloc(redir, sizeof(char) * 3); + if (lexer->data[lexer->index - 1] == '<' + && lexer->data[lexer->index] == '|') + { + free(redir); + return NULL; + } + + redir[1] = lexer->data[lexer->index]; + ++lexer->index; + } + return redir; } \ No newline at end of file diff --git a/src/lexer/tests/lexer2_tests.c b/src/lexer/tests/lexer2_tests.c index 9afb640a..7e38dc73 100644 --- a/src/lexer/tests/lexer2_tests.c +++ b/src/lexer/tests/lexer2_tests.c @@ -25,6 +25,48 @@ Test(lexer2, token_and) lexer_free(lexer); } +Test(lexer2, token_and2) +{ + struct lexer *lexer = lexer_new("false && true"); + struct token tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "false"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_AND); + cr_assert_str_eq(tok.data, "&&"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "true"); + token_free(tok); + + lexer_free(lexer); +} + +Test(lexer2, token_and_stick) +{ + struct lexer *lexer = lexer_new("false&&true"); + struct token tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "false"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_AND); + cr_assert_str_eq(tok.data, "&&"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "true"); + token_free(tok); + + lexer_free(lexer); +} + Test(lexer2, token_or) { struct lexer *lexer = lexer_new("||"); @@ -45,6 +87,42 @@ Test(lexer2, token_pipe) lexer_free(lexer); } +Test(lexer2, token_pipe2) +{ + struct lexer *lexer = lexer_new("echo papa|tr a e"); + struct token tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "echo"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "papa"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_PIPE); + cr_assert_str_eq(tok.data, "|"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "tr"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "a"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "e"); + token_free(tok); + + lexer_free(lexer); +} + Test(lexer2, token_negate) { struct lexer *lexer = lexer_new("!"); @@ -59,7 +137,7 @@ Test(lexer2, token_input_redir) { struct lexer *lexer = lexer_new("<"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_INPUT_REDIR); + cr_assert_eq(tok.type, TOKEN_REDIR); cr_assert_str_eq(tok.data, "<"); token_free(tok); lexer_free(lexer); @@ -69,7 +147,7 @@ Test(lexer2, token_output_redir) { struct lexer *lexer = lexer_new(">"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_OUTPUT_REDIR); + cr_assert_eq(tok.type, TOKEN_REDIR); cr_assert_str_eq(tok.data, ">"); token_free(tok); lexer_free(lexer); @@ -79,9 +157,7 @@ Test(lexer2, token_append) { struct lexer *lexer = lexer_new(">>"); struct token tok = lexer_pop(lexer); - printf("%s-\n", tok.data); - printf("%d\n", fnmatch("[0-9]*>>", ">>", 0)); - cr_assert_eq(tok.type, TOKEN_APPEND, "got %d", tok.type); + cr_assert_eq(tok.type, TOKEN_REDIR, "got %d", tok.type); cr_assert_str_eq(tok.data, ">>"); token_free(tok); lexer_free(lexer); @@ -91,7 +167,7 @@ Test(lexer2, token_dup_input) { struct lexer *lexer = lexer_new("<&"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_DUP_INPUT); + cr_assert_eq(tok.type, TOKEN_REDIR); cr_assert_str_eq(tok.data, "<&"); token_free(tok); lexer_free(lexer); @@ -101,7 +177,7 @@ Test(lexer2, token_dup_input_output) { struct lexer *lexer = lexer_new(">&"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_DUP_INPUT_OUTPUT); + cr_assert_eq(tok.type, TOKEN_REDIR); cr_assert_str_eq(tok.data, ">&"); token_free(tok); lexer_free(lexer); @@ -111,7 +187,7 @@ Test(lexer2, token_noclobber) { struct lexer *lexer = lexer_new(">|"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_NOCLOBBER); + cr_assert_eq(tok.type, TOKEN_REDIR); cr_assert_str_eq(tok.data, ">|"); token_free(tok); lexer_free(lexer); @@ -121,17 +197,105 @@ Test(lexer2, token_dup_input_output2) { struct lexer *lexer = lexer_new("<>"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_DUP_INPUT_OUTPUT); + cr_assert_eq(tok.type, TOKEN_REDIR); cr_assert_str_eq(tok.data, "<>"); token_free(tok); lexer_free(lexer); } +Test(lexer2, token_redir_stick_left) +{ + struct lexer *lexer = lexer_new("ls -la 2>file"); + struct token tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "ls"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "-la"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "2"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_REDIR); + cr_assert_str_eq(tok.data, ">"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "file"); + token_free(tok); + + lexer_free(lexer); +} + +Test(lexer2, token_redir_stick_left2) +{ + struct lexer *lexer = lexer_new("ls -la 2<>file"); + struct token tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "ls"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "-la"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "2"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_REDIR); + cr_assert_str_eq(tok.data, "<>"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "file"); + token_free(tok); + + lexer_free(lexer); +} + +Test(lexer2, token_redir_stick_left3) +{ + struct lexer *lexer = lexer_new("ls -la >| file"); + struct token tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "ls"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "-la"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_REDIR); + cr_assert_str_eq(tok.data, ">|"); + token_free(tok); + + tok = lexer_pop(lexer); + cr_assert_eq(tok.type, TOKEN_WORD); + cr_assert_str_eq(tok.data, "file"); + token_free(tok); + + lexer_free(lexer); +} + Test(lexer2, token_while) { struct lexer *lexer = lexer_new("while"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_WHILE); + cr_assert_eq(tok.type, TOKEN_WORD); cr_assert_str_eq(tok.data, "while"); token_free(tok); lexer_free(lexer); @@ -141,7 +305,7 @@ Test(lexer2, token_until) { struct lexer *lexer = lexer_new("until"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_UNTIL); + cr_assert_eq(tok.type, TOKEN_WORD); cr_assert_str_eq(tok.data, "until"); token_free(tok); lexer_free(lexer); @@ -151,7 +315,7 @@ Test(lexer2, token_for) { struct lexer *lexer = lexer_new("for"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_FOR); + cr_assert_eq(tok.type, TOKEN_WORD); cr_assert_str_eq(tok.data, "for"); token_free(tok); lexer_free(lexer); @@ -161,7 +325,7 @@ Test(lexer2, token_do) { struct lexer *lexer = lexer_new("do"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_DO); + cr_assert_eq(tok.type, TOKEN_WORD); cr_assert_str_eq(tok.data, "do"); token_free(tok); lexer_free(lexer); @@ -171,7 +335,7 @@ Test(lexer2, token_done) { struct lexer *lexer = lexer_new("done"); struct token tok = lexer_pop(lexer); - cr_assert_eq(tok.type, TOKEN_DONE); + cr_assert_eq(tok.type, TOKEN_WORD); cr_assert_str_eq(tok.data, "done"); token_free(tok); lexer_free(lexer); diff --git a/src/lexer/tests/lexer_tests.c b/src/lexer/tests/lexer_tests.c index 03d1b795..7b5a2d18 100644 --- a/src/lexer/tests/lexer_tests.c +++ b/src/lexer/tests/lexer_tests.c @@ -74,7 +74,7 @@ Test(lexer, lexer_pop_simple) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 17, "index = %lu", lexer->index); + cr_assert_eq(lexer->index, 16, "index = %lu", lexer->index); token_free(token); lexer_free(lexer); @@ -104,7 +104,7 @@ Test(lexer, lexer_pop_with_semicolon) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 10); + cr_assert_eq(lexer->index, 9); token_free(token); lexer_free(lexer); @@ -134,7 +134,7 @@ Test(lexer, lexer_pop_with_backslash_semicolon) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 11); + cr_assert_eq(lexer->index, 10); token_free(token); lexer_free(lexer); @@ -158,7 +158,7 @@ Test(Lexer, lexer_pop_with_backslash_semicolon_in_word) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 10); + cr_assert_eq(lexer->index, 9); token_free(token); lexer_free(lexer); @@ -182,7 +182,7 @@ Test(Lexer, lexer_pop_with_single_quote) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 9); + cr_assert_eq(lexer->index, 8); token_free(token); lexer_free(lexer); @@ -206,7 +206,7 @@ Test(Lexer, lexer_pop_with_backslash_single_quote) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 8); + cr_assert_eq(lexer->index, 7); token_free(token); lexer_free(lexer); @@ -230,7 +230,7 @@ Test(Lexer, lexer_pop_with_backslash_single_quote_2) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 17); + cr_assert_eq(lexer->index, 16); token_free(token); lexer_free(lexer); @@ -254,7 +254,7 @@ Test(Lexer, lexer_pop_with_backslash_diactivate_single_quote) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 19); + cr_assert_eq(lexer->index, 18); token_free(token); lexer_free(lexer); @@ -458,7 +458,7 @@ Test(Lexer, simple_comment) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 31); + cr_assert_eq(lexer->index, 30); token_free(token); lexer_free(lexer); @@ -476,7 +476,7 @@ Test(Lexer, comment_with_semicolon) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF, "token.type = %d", token.type); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 37, "lexer->index = %lu", lexer->index); + cr_assert_eq(lexer->index, 36, "lexer->index = %lu", lexer->index); token_free(token); lexer_free(lexer); @@ -506,7 +506,7 @@ Test(Lexer, comment_with_backslash_n) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF, "token.type = %d", token.type); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 37, "lexer->index = %lu", lexer->index); + cr_assert_eq(lexer->index, 36, "lexer->index = %lu", lexer->index); token_free(token); lexer_free(lexer); @@ -524,7 +524,7 @@ Test(Lexer, comment_with_back_slash2) token = lexer_pop(lexer); cr_assert_eq(token.type, TOKEN_EOF, "token.type = %d", token.type); cr_assert_str_eq(token.data, "\0"); - cr_assert_eq(lexer->index, 38, "lexer->index = %lu", lexer->index); + cr_assert_eq(lexer->index, 37, "lexer->index = %lu", lexer->index); token_free(token); lexer_free(lexer); diff --git a/src/lexer/token.h b/src/lexer/token.h index 373474fb..3d2a38c5 100644 --- a/src/lexer/token.h +++ b/src/lexer/token.h @@ -22,7 +22,6 @@ enum token_type TOKEN_ELSE, TOKEN_FI, TOKEN_SEMICOLON, - TOKEN_SIMPLE_QUOTE, TOKEN_WORD, // Any word TOKEN_EOL, // End of line ('\n') TOKEN_EOF, // End of file @@ -33,19 +32,8 @@ enum token_type TOKEN_OR, // || TOKEN_PIPE, // | TOKEN_NEGATE, // \! - TOKEN_INPUT_REDIR, // < - TOKEN_OUTPUT_REDIR, // > - TOKEN_APPEND, // >> - TOKEN_DUP_INPUT, // <& - TOKEN_DUP_OUTPUT, // >& - TOKEN_NOCLOBBER, // >| - TOKEN_DUP_INPUT_OUTPUT, // <> - TOKEN_WHILE, // while - TOKEN_UNTIL, // until - TOKEN_FOR, // for - TOKEN_DO, // do - TOKEN_DONE, // done - TOKEN_DOUBLE_QUOTE, // " + TOKEN_REDIR, // >, <, >>, >&, <&, >|, <> + TOKEN_VARIABLE // $ };