Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions discord_markdown_ast_parser/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Union

from lexer import lex, Lexing
from parser import Node, parse_tokens
from .lexer import lex, Lexing
from .parser import Node, parse_tokens


def lexing_list_convert(lexing: Lexing) -> List[Lexing]:
def lexing_list_convert(lexing: Union[List[Lexing], Lexing]) -> List[Lexing]:
if not isinstance(lexing, list):
lexing = [lexing]
return [Lexing(item) if isinstance(item, str) else item for item in lexing]
Expand Down
25 changes: 10 additions & 15 deletions discord_markdown_ast_parser/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import itertools
from typing import Optional, Generator, Any, List, Dict, Tuple, Iterable

from lexer import Token, LexingRule, Lexing
from .lexer import Token, LexingRule, Lexing


NodeType = Enum(
Expand All @@ -21,12 +21,14 @@
"CHANNEL",
"SLASH_COMMAND",
"EMOJI_CUSTOM",
"EMOJI_CUSTOM_ANIMATED",
"EMOJI_UNICODE",
"EMOJI_UNICODE_ENCODED",
"URL_WITH_PREVIEW_EMBEDDED",
"URL_WITHOUT_PREVIEW_EMBEDDED",
"URL_WITH_PREVIEW",
"URL_WITHOUT_PREVIEW",
"TIMESTAMP",
"QUOTE_BLOCK",
"CODE_BLOCK",
"CODE_INLINE",
Expand Down Expand Up @@ -248,7 +250,7 @@ def parse_tokens_generator(
)
i += 1
continue

# URL with preview
if LexingRule.URL_WITH_PREVIEW in current_token:
yield Node(
Expand Down Expand Up @@ -316,21 +318,14 @@ def parse_tokens_generator(
tokens[i + 1 :], [current_token.lexing_rule]
)
if children_token is not None:
children_content = ""
# treat all children token as inline text
for child_token in children_token:
children_content += child_token.value

children_content = "".join(child_token.value for child_token in children_token)
# check for a language specifier
lines = children_content.split("\n")
# there must be at least one other non-empty line
# (the content doesn't matter, there just has to be one)
non_empty_line_found = False
lang = None
for line_index in range(1, len(lines)):
if len(lines[line_index]) > 0:
non_empty_line_found = True
break
non_empty_line_found = any(
len(lines[line_index]) > 0
for line_index in range(1, len(lines))
)
if non_empty_line_found:
match = LANG_SPEC.fullmatch(lines[0])
# if there is any behind the lang spec, then it is normal text
Expand Down Expand Up @@ -382,7 +377,7 @@ def parse_tokens_generator(
i = len(tokens) # move to the end
break

if len(children_token_in_quote_block) > 0:
if children_token_in_quote_block:
# tell the inner parse function that it's now inside a quote block
children_nodes = list(
parse_tokens_generator(
Expand Down