diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl1.rst b/docs/source/doc/kaleidoscope/PythonLangImpl1.rst index 7b1c3a6..00b16fb 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl1.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl1.rst @@ -115,13 +115,13 @@ following simple example computes `Fibonacci numbers `_: -.. code-block:: python +.. code-block:: text # Compute the x'th fibonacci number. - def fib(x): - if x < 3: + def fib(x) + if x < 3 return 1 - else: + else return fib(x-1)+fib(x-2) # This expression will compute the 40th number. @@ -226,7 +226,7 @@ the line. # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]\ *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]*') REGEX_COMMENT = re.compile('#.*') diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl2.rst b/docs/source/doc/kaleidoscope/PythonLangImpl2.rst index 8c915db..a375512 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl2.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl2.rst @@ -772,7 +772,7 @@ Lexer # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:\.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]*') REGEX_COMMENT = re.compile('#.*') def Tokenize(string): @@ -811,7 +811,7 @@ Lexer yield CharacterToken(string[0]) string = string[1:] - yield EOFToken() + yield EOFToken() diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl3.rst b/docs/source/doc/kaleidoscope/PythonLangImpl3.rst index 5719bb2..15ec212 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl3.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl3.rst @@ -1058,7 +1058,7 @@ Main driver code. '+': 20, '-': 20, '*': 40 - } + } # Run the main "interpreter loop". while True: diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl6.rst b/docs/source/doc/kaleidoscope/PythonLangImpl6.rst index 2e98cef..8ff31d4 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl6.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl6.rst @@ -847,7 +847,7 @@ Lexer # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:\.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]*') REGEX_COMMENT = re.compile('#.*') def Tokenize(string): diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl7.rst b/docs/source/doc/kaleidoscope/PythonLangImpl7.rst index d3dfb74..629608c 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl7.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl7.rst @@ -1000,7 +1000,7 @@ Lexer # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:\.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') REGEX_COMMENT = re.compile('#.*') def Tokenize(string):