[文档]@attr.s(slots=True)classDelimiter:# Char code of the starting marker (number).marker:int=attr.ib()# Total length of these series of delimiters.length:int=attr.ib()# An amount of characters before this one that's equivalent to# current one. In plain English: if this delimiter does not open# an emphasis, neither do previous `jump` characters.## Used to skip sequences like "*****" in one step, for 1st asterisk# value will be 0, for 2nd it's 1 and so on.jump:int=attr.ib()# A position of the token this delimiter corresponds to.token:int=attr.ib()# If this delimiter is matched as a valid opener, `end` will be# equal to its position, otherwise it's `-1`.end:int=attr.ib()# Boolean flags that determine if this delimiter could open or close# an emphasis.open:bool=attr.ib()close:bool=attr.ib()level:bool=attr.ib(default=None)
[文档]classStateInline(StateBase):def__init__(self,src:str,md:"MarkdownIt",env:MutableMapping,outTokens:List[Token]):self.src=srcself.env=envself.md=mdself.tokens=outTokensself.tokens_meta:List[Optional[dict]]=[None]*len(outTokens)self.pos=0self.posMax=len(self.src)self.level=0self.pending=""self.pendingLevel=0# Stores { start: end } pairs. Useful for backtrack# optimization of pairs parse (emphasis, strikes).self.cache:Dict[int,int]={}# List of emphasis-like delimiters for current tagself.delimiters:List[Delimiter]=[]# Stack of delimiter lists for upper level tagsself._prev_delimiters:List[List[Delimiter]]=[]# backticklength => last seen positionself.backticks:Dict[int,int]={}self.backticksScanned=Falsedef__repr__(self):return(f"{self.__class__.__name__}"f"(pos=[{self.pos} of {self.posMax}], token={len(self.tokens)})")
[文档]defpush(self,ttype,tag,nesting):"""Push new token to "stream". If pending text exists - flush it as text token """ifself.pending:self.pushPending()token=Token(ttype,tag,nesting)token_meta=Noneifnesting<0:# closing tagself.level-=1self.delimiters=self._prev_delimiters.pop()token.level=self.levelifnesting>0:# opening tagself.level+=1self._prev_delimiters.append(self.delimiters)self.delimiters=[]token_meta={"delimiters":self.delimiters}self.pendingLevel=self.levelself.tokens.append(token)self.tokens_meta.append(token_meta)returntoken
[文档]defscanDelims(self,start,canSplitWord):""" Scan a sequence of emphasis-like markers, and determine whether it can start an emphasis sequence or end an emphasis sequence. - start - position to scan from (it should point at a valid marker); - canSplitWord - determine if these markers can be found inside a word """pos=startleft_flanking=Trueright_flanking=Truemaximum=self.posMaxmarker=self.srcCharCode[start]# treat beginning of the line as a whitespacelastChar=self.srcCharCode[start-1]ifstart>0else0x20whilepos<maximumandself.srcCharCode[pos]==marker:pos+=1count=pos-start# treat end of the line as a whitespacenextChar=self.srcCharCode[pos]ifpos<maximumelse0x20isLastPunctChar=isMdAsciiPunct(lastChar)orisPunctChar(chr(lastChar))isNextPunctChar=isMdAsciiPunct(nextChar)orisPunctChar(chr(nextChar))isLastWhiteSpace=isWhiteSpace(lastChar)isNextWhiteSpace=isWhiteSpace(nextChar)ifisNextWhiteSpace:left_flanking=FalseelifisNextPunctChar:ifnot(isLastWhiteSpaceorisLastPunctChar):left_flanking=FalseifisLastWhiteSpace:right_flanking=FalseelifisLastPunctChar:ifnot(isNextWhiteSpaceorisNextPunctChar):right_flanking=FalseifnotcanSplitWord:can_open=left_flankingand((notright_flanking)orisLastPunctChar)can_close=right_flankingand((notleft_flanking)orisNextPunctChar)else:can_open=left_flankingcan_close=right_flankingreturnScanned(can_open,can_close,count)