Merge pull request #1296 from wcandillon/master
XQuery Lexer Improvements
This commit is contained in:
commit
643b4fdbd5
3 changed files with 2884 additions and 2876 deletions
|
|
@ -61,7 +61,8 @@ define(function(require, exports, module){
|
|||
});
|
||||
};
|
||||
};
|
||||
var keys = "after|all|allowing|ancestor|ancestor-or-self|and|any|append|array|as|ascending|at|attribute|base-uri|before|boundary-space|break|by|case|cast|castable|catch|check|child|collation|collection|comment|constraint|construction|contains|content|context|continue|copy|copy-namespaces|count|decimal-format|decimal-separator|declare|default|delete|descendant|descendant-or-self|descending|diacritics|different|digit|distance|div|document|document-node|element|else|empty|empty-sequence|encoding|end|entire|eq|every|exactly|except|exit|external|first|following|following-sibling|for|foreach|foreign|from|ft-option|ftand|ftnot|ftor|function|ge|greatest|group|grouping-separator|gt|idiv|if|import|in|index|infinity|inherit|insensitive|insert|instance|integrity|intersect|into|is|item|json|json-item|key|language|last|lax|le|least|let|levels|loop|lowercase|lt|minus-sign|mod|modify|module|most|namespace|namespace-node|ne|next|no|no-inherit|no-preserve|node|nodes|not|object|occurs|of|on|only|option|or|order|ordered|ordering|paragraph|paragraphs|parent|pattern-separator|per-mille|percent|phrase|position|preceding|preceding-sibling|preserve|previous|processing-instruction|relationship|rename|replace|return|returning|revalidation|same|satisfies|schema|schema-attribute|schema-element|score|self|sensitive|sentence|sentences|skip|sliding|some|stable|start|stemming|stop|strict|strip|structured-item|switch|text|then|thesaurus|times|to|treat|try|tumbling|type|typeswitch|union|unique|unordered|updating|uppercase|using|validate|value|variable|version|weight|when|where|while|wildcards|window|with|without|word|words|xquery|zero-digit".split("|");
|
||||
|
||||
var keys = "after|ancestor|ancestor-or-self|and|as|ascending|attribute|before|case|cast|castable|child|collation|comment|copy|count|declare|default|delete|descendant|descendant-or-self|descending|div|document|document-node|element|else|empty|empty-sequence|end|eq|every|except|first|following|following-sibling|for|function|ge|group|gt|idiv|if|import|insert|instance|intersect|into|is|item|last|le|let|lt|mod|modify|module|namespace|namespace-node|ne|node|only|or|order|ordered|parent|preceding|preceding-sibling|processing-instruction|rename|replace|return|satisfies|schema-attribute|schema-element|self|some|stable|start|switch|text|to|treat|try|typeswitch|union|unordered|validate|where|with|xquery|contains|paragraphs|sentences|times|words|by|collectionreturn|variable|version|option|when|encoding|toswitch|catch|tumbling|sliding|window|at|using|stemming|collection|schema|while|on|nodes|index|external|then|in|updating|value|of|containsbreak|loop|continue|exit|returning|append|json|position|strict".split("|");
|
||||
var keywords = keys.map(
|
||||
function(val) { return { name: "'" + val + "'", token: "keyword" }; }
|
||||
);
|
||||
|
|
@ -150,6 +151,7 @@ define(function(require, exports, module){
|
|||
],
|
||||
Pragma: [
|
||||
{ name: "PragmaContents", token: pragma },
|
||||
{ name: "'#'", token: pragma },
|
||||
{ name: "'#)'", token: pragma, next: function(stack){ stack.pop(); } }
|
||||
],
|
||||
Comment: [
|
||||
|
|
@ -173,8 +175,7 @@ define(function(require, exports, module){
|
|||
],
|
||||
PI: [
|
||||
{ name: "DirPIContents", token: pi },
|
||||
{ name: "PITarget", token: pi },
|
||||
{ name: "S", token: pi },
|
||||
{ name: "'?'", token: pi },
|
||||
{ name: "'?>'", token: pi, next: function(stack){ stack.pop(); } }
|
||||
],
|
||||
AposString: [
|
||||
|
|
@ -231,6 +232,7 @@ exports.XQueryLexer = function() {
|
|||
}
|
||||
|
||||
if(token.name === "EOF") { break; }
|
||||
if(token.value === "") { throw "Encountered empty string lexical rule."; }
|
||||
|
||||
tokens.push({
|
||||
type: info === null ? "text" : (typeof(info.token) === "function" ? info.token(token.value) : info.token),
|
||||
|
|
@ -239,7 +241,7 @@ exports.XQueryLexer = function() {
|
|||
|
||||
if(info && info.next) {
|
||||
info.next(stack);
|
||||
}
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
if(e instanceof tokenizer.ParseException) {
|
||||
|
|
|
|||
|
|
@ -92,10 +92,10 @@ XMLComment
|
|||
::= DirCommentContents | '-->' | EOF
|
||||
/* ws: explicit */
|
||||
|
||||
PI ::= DirPIContents | '?>' | EOF
|
||||
PI ::= DirPIContents | '?' | '?>' | EOF
|
||||
/* ws: explicit */
|
||||
|
||||
Pragma ::= PragmaContents | '#)' | EOF
|
||||
Pragma ::= PragmaContents | '#' | '#)' | EOF
|
||||
/* ws: explicit */
|
||||
|
||||
Comment ::= ':)' | '(:' | CommentContents | EOF
|
||||
|
|
@ -406,7 +406,7 @@ PragmaContents
|
|||
DirCommentContents
|
||||
::= ( ( Char - '-' ) | '-' ( Char - '-' ) )+
|
||||
DirPIContents
|
||||
::= ( Char* - ( Char* '?' Char* ) )
|
||||
::= ( Char* - ( Char* '?' Char* ) )+
|
||||
CDataSectionContents
|
||||
::= ( Char+ - ( Char* ']]>' Char* ) ) & ']]'
|
||||
| ( Char+ - ( Char* ']]>' Char* ) ) & $
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue