@@ -682,6 +682,7 @@ def tokeniter(self, source, name, filename=None, state=None):
682682 balancing_stack = []
683683 lstrip_unless_re = self .lstrip_unless_re
684684 newlines_stripped = 0
685+ line_starting = True
685686
686687 while 1 :
687688 # tokenizer loop
@@ -731,11 +732,11 @@ def tokeniter(self, source, name, filename=None, state=None):
731732 ):
732733 # The start of text between the last newline and the tag.
733734 l_pos = text .rfind ("\n " ) + 1
734-
735- # If there's only whitespace between the newline and the
736- # tag, strip it.
737- if not lstrip_unless_re .search (text , l_pos ):
738- groups = (text [:l_pos ],) + groups [1 :]
735+ if l_pos > 0 or line_starting :
736+ # If there's only whitespace between the newline and the
737+ # tag, strip it.
738+ if not lstrip_unless_re .search (text , l_pos ):
739+ groups = (text [:l_pos ],) + groups [1 :]
739740
740741 for idx , token in enumerate (tokens ):
741742 # failure group
@@ -794,6 +795,8 @@ def tokeniter(self, source, name, filename=None, state=None):
794795 yield lineno , tokens , data
795796 lineno += data .count ("\n " )
796797
798+ line_starting = m .group ()[- 1 :] == "\n "
799+
797800 # fetch new position into new variable so that we can check
798801 # if there is a internal parsing error which would result
799802 # in an infinite loop
0 commit comments