From f9376bf2d0d8c969eed58cec68b7b782b2642827 Mon Sep 17 00:00:00 2001 From: John Szakmeister Date: Fri, 30 Nov 2012 03:29:40 -0500 Subject: [PATCH 1/4] Fix an error in the full listing for the kaleidoscope lexer. --- docs/source/doc/kaleidoscope/PythonLangImpl2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl2.rst b/docs/source/doc/kaleidoscope/PythonLangImpl2.rst index 8c915db..d5d6efc 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl2.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl2.rst @@ -811,7 +811,7 @@ Lexer yield CharacterToken(string[0]) string = string[1:] - yield EOFToken() + yield EOFToken() From 7c7c198953b851711a99928303acd5165f2f6f30 Mon Sep 17 00:00:00 2001 From: John Szakmeister Date: Fri, 30 Nov 2012 04:01:04 -0500 Subject: [PATCH 2/4] Mark a block of code as text, and remove the python-isms. The kaleidoscope language being implemented looks similar to Python, but is not the same. --- docs/source/doc/kaleidoscope/PythonLangImpl1.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl1.rst b/docs/source/doc/kaleidoscope/PythonLangImpl1.rst index 7b1c3a6..90943c1 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl1.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl1.rst @@ -115,13 +115,13 @@ following simple example computes `Fibonacci numbers `_: -.. code-block:: python +.. code-block:: text # Compute the x'th fibonacci number. - def fib(x): - if x < 3: + def fib(x) + if x < 3 return 1 - else: + else return fib(x-1)+fib(x-2) # This expression will compute the 40th number. From 78f81fec73f69ec14cf894ebdb9c32b2b5aa2631 Mon Sep 17 00:00:00 2001 From: John Szakmeister Date: Fri, 30 Nov 2012 04:04:14 -0500 Subject: [PATCH 3/4] Fix the id matching in the kaleidoscope tutorial. --- docs/source/doc/kaleidoscope/PythonLangImpl1.rst | 2 +- docs/source/doc/kaleidoscope/PythonLangImpl2.rst | 2 +- docs/source/doc/kaleidoscope/PythonLangImpl6.rst | 2 +- docs/source/doc/kaleidoscope/PythonLangImpl7.rst | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl1.rst b/docs/source/doc/kaleidoscope/PythonLangImpl1.rst index 90943c1..00b16fb 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl1.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl1.rst @@ -226,7 +226,7 @@ the line. # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]\ *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]*') REGEX_COMMENT = re.compile('#.*') diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl2.rst b/docs/source/doc/kaleidoscope/PythonLangImpl2.rst index d5d6efc..a375512 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl2.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl2.rst @@ -772,7 +772,7 @@ Lexer # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:\.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]*') REGEX_COMMENT = re.compile('#.*') def Tokenize(string): diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl6.rst b/docs/source/doc/kaleidoscope/PythonLangImpl6.rst index 2e98cef..8ff31d4 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl6.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl6.rst @@ -847,7 +847,7 @@ Lexer # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:\.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9]*') REGEX_COMMENT = re.compile('#.*') def Tokenize(string): diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl7.rst b/docs/source/doc/kaleidoscope/PythonLangImpl7.rst index d3dfb74..629608c 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl7.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl7.rst @@ -1000,7 +1000,7 @@ Lexer # Regular expressions that tokens and comments of our language. REGEX_NUMBER = re.compile('[0-9]+(?:\.[0-9]+)?') - REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') + REGEX_IDENTIFIER = re.compile('[a-zA-Z][a-zA-Z0-9] *') REGEX_COMMENT = re.compile('#.*') def Tokenize(string): From b3a04c4a34213e29b27245c91ab2998a4bed407d Mon Sep 17 00:00:00 2001 From: John Szakmeister Date: Fri, 30 Nov 2012 04:45:30 -0500 Subject: [PATCH 4/4] Move the closing curly branch to line up with the rest of the code. --- docs/source/doc/kaleidoscope/PythonLangImpl3.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/doc/kaleidoscope/PythonLangImpl3.rst b/docs/source/doc/kaleidoscope/PythonLangImpl3.rst index 5719bb2..15ec212 100644 --- a/docs/source/doc/kaleidoscope/PythonLangImpl3.rst +++ b/docs/source/doc/kaleidoscope/PythonLangImpl3.rst @@ -1058,7 +1058,7 @@ Main driver code. '+': 20, '-': 20, '*': 40 - } + } # Run the main "interpreter loop". while True: