Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 7 additions & 14 deletions discord_markdown_ast_parser/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def parse_tokens_generator(
yield Node(NodeType.URL_WITHOUT_PREVIEW_EMBEDDED, url=current_token.groups[1], content=current_token.groups[0])
i += 1
continue

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function parse_tokens_generator refactored with the following changes:

This removes the following comments ( why? ):

# there must be at least one other non-empty line
# treat all children token as inline text
# (the content doesn't matter, there just has to be one)

# URL with preview
if LexingRule.URL_WITH_PREVIEW in current_token:
yield Node(NodeType.URL_WITH_PREVIEW, url=current_token.value)
Expand Down Expand Up @@ -297,21 +297,14 @@ def parse_tokens_generator(
tokens[i + 1 :], [current_token.lexing_rule]
)
if children_token is not None:
children_content = ""
# treat all children token as inline text
for child_token in children_token:
children_content += child_token.value

children_content = "".join(child_token.value for child_token in children_token)
# check for a language specifier
lines = children_content.split("\n")
# there must be at least one other non-empty line
# (the content doesn't matter, there just has to be one)
non_empty_line_found = False
lang = None
for line_index in range(1, len(lines)):
if len(lines[line_index]) > 0:
non_empty_line_found = True
break
non_empty_line_found = any(
len(lines[line_index]) > 0
for line_index in range(1, len(lines))
)
if non_empty_line_found:
match = LANG_SPEC.fullmatch(lines[0])
# if there is any behind the lang spec, then it is normal text
Expand Down Expand Up @@ -363,7 +356,7 @@ def parse_tokens_generator(
i = len(tokens) # move to the end
break

if len(children_token_in_quote_block) > 0:
if children_token_in_quote_block:
# tell the inner parse function that it's now inside a quote block
children_nodes = list(
parse_tokens_generator(
Expand Down