Castle: The best Real-Time/Embedded/HighTech language EVER. Attempt 2
Revision | f503dd15f787934c19b6092099d4bc39d6cf747b (tree) |
---|---|
Zeit | 2021-11-28 06:40:49 |
Autor | Albert Mietus < albert AT mietus DOT nl > |
Commiter | Albert Mietus < albert AT mietus DOT nl > |
asis
@@ -0,0 +1,5 @@ | ||
1 | +.DS_Store | |
2 | +__pycache__ | |
3 | +.*\.pyc | |
4 | +.pytest_cache | |
5 | +.*\.dot | |
\ No newline at end of file |
@@ -0,0 +1,50 @@ | ||
1 | +from arpeggio import Optional, ZeroOrMore, OneOrMore, EOF | |
2 | +from arpeggio import RegExMatch as _ | |
3 | +from arpeggio import ParserPython | |
4 | + | |
5 | +def peg_grammar(): return OneOrMore(rule), EOF | |
6 | +def rule(): return rule_name, '<-', ordered_choice, ";" | |
7 | +def ordered_choice(): return sequence, ZeroOrMore('|', sequence) | |
8 | +def sequence(): return ZeroOrMore(prefix) | |
9 | +def prefix(): return Optional(['&','!']), suffix | |
10 | +def suffix(): return expression, Optional([ '?' , '*' , '+' , '#' ]) | |
11 | +def expression(): return [ regex_term, str_term, ('(', ordered_choice, ')'), rule_crossref] | |
12 | +def rule_crossref(): return rule_name | |
13 | +def rule_name(): return ID | |
14 | +def regex_term(): return [\ | |
15 | + (RE, re_no_slash, RE), | |
16 | + (REs3, str_no_s3, S3), | |
17 | + (REd3, str_no_d3, D3), | |
18 | + (REs1, str_no_s1, S1), | |
19 | + (REd1, str_no_d1, D1) | |
20 | + ] | |
21 | +def str_term(): return [\ | |
22 | + (S3, str_no_s3, S3), | |
23 | + (D3, str_no_d3, D3), | |
24 | + (S1, str_no_s1, S1), | |
25 | + (D1, str_no_d1, D1) | |
26 | + ] | |
27 | + | |
28 | +def ID(): return _(r"[a-zA-Z_]([a-zA-Z_]|[0-9])*") | |
29 | + | |
30 | +def re_no_slash(): return _(r"((\\/)|[^\/])*") | |
31 | + | |
32 | +def str_no_s1(): return _(r"((\\')|[^'\n])*") # Does NOT match multiline -- note 'multiline=False' is not the same | |
33 | +def str_no_d1(): return _(r'((\\")|[^"\n])*') # idem | |
34 | +def str_no_s3(): return _(r"([^']|('[^'])|(''[^']))*") # ALLOW multiline | |
35 | +def str_no_d3(): return _(r'''([^"]|("[^"])|(""[^"]))*''') # idem | |
36 | + | |
37 | +S1 = "'" | |
38 | +D1 = '"' | |
39 | +S3 = "'''" | |
40 | +D3 = '"""' | |
41 | +RE = '/' | |
42 | +REs1 = _(r"[rR]'") | |
43 | +REd1 = _(r'[rR]"') | |
44 | +REs3 = _(r"[rR]'''") | |
45 | +REd3 = _(r'[rR]"""') | |
46 | + | |
47 | + | |
48 | + | |
49 | +def comment(): return "//", _(".*\n") | |
50 | + |
@@ -0,0 +1,37 @@ | ||
1 | +// This is a fragment from ../examples/My_GrammarParser.Castle | |
2 | + | |
3 | + peg_grammar <- rule+ ; | |
4 | + rule <- rule_name '<-' ordered_choice ';' ; | |
5 | + ordered_choice <- sequence ('|' sequence)* ; | |
6 | + sequence <- prefix* ; | |
7 | + prefix <- ( AND | NOT )? suffix ; | |
8 | + suffix <- expression ( '?' | '*' | '+' | '#' )? ; | |
9 | + expression <- regex_term | |
10 | + | rule_crossref | |
11 | + | '(' ordered_choice ')' | |
12 | + | str_term | |
13 | + ; | |
14 | + rule_crossref <- rule_name; | |
15 | + rule_name <- ID ; | |
16 | + str_term <- "'" str_no_s1 "'" | |
17 | + | '"' str_no_d1 '"' | |
18 | + | "'''" str_no_s3 "'''" | |
19 | + | '"""' str_no_d3 '"""' | |
20 | + ; | |
21 | + regex_term <- '/' str_no_slash '/' | |
22 | + | "r'" str_no_s1 "'" | |
23 | + | 'r"' str_no_d1 '"' | |
24 | + | "r'''" str_no_s3 "'''" | |
25 | + | 'r"""' str_no_d3 '"""' | |
26 | + | "R'" str_no_s1 "'" | |
27 | + | 'R"' str_no_d1 '"' | |
28 | + | "R'''" str_no_s3 "'''" | |
29 | + | 'R"""' str_no_d3 '"""' | |
30 | + ; | |
31 | + str_no_slash <- /((\\/)|[^\/])*/ ; | |
32 | + str_no_XXX <- XXX | |
33 | + | ETC | |
34 | + ; | |
35 | + AND <- '&' ; | |
36 | + NOT <- '!' ; | |
37 | + |
@@ -0,0 +1,38 @@ | ||
1 | +import pytest | |
2 | +from grammar import * | |
3 | + | |
4 | +import arpeggio | |
5 | +RE, S = arpeggio.RegExMatch, arpeggio.StrMatch # shortcut | |
6 | + | |
7 | +show_dot=True | |
8 | + | |
9 | +def parse_regex(txt, pattern=None): | |
10 | + #print(f'\nXXX >>{txt}<<') | |
11 | + parser = ParserPython(regex_term, comment, debug=show_dot) | |
12 | + tree = parser.parse(txt) | |
13 | + assert tree.position_end == len(txt) , f"Not parsed whole input; Only: >>{txt[tree.position: tree.position_end]}<<; Not: >>{txt[tree.position_end:]}<<." | |
14 | + | |
15 | + assert isinstance(tree.rule, arpeggio.OrderedChoice) and tree.rule_name == "regex_term" | |
16 | + assert len(tree) == 3, "regex_term is an Ordered_Choice of always 3 Terminals" | |
17 | + | |
18 | + for e in tree: assert isinstance(e, arpeggio.Terminal) | |
19 | + if pattern: | |
20 | + for e,T in zip(tree,pattern): | |
21 | + if T is not None: assert isinstance(e.rule, T), f"{type(e.rule).__name__} doesn't match {T.__name__}" | |
22 | + | |
23 | + return tree | |
24 | + | |
25 | +def test_slash_simple(): parse_regex(r"/ABC/", pattern=[S,RE,S]) | |
26 | +def test_slash_slashonly(): parse_regex(r"/\//", pattern=[S,RE,S]) | |
27 | +def test_slash_withslash_r(): parse_regex(r"/ab\/c/", pattern=[S,RE,S]) | |
28 | + | |
29 | +def test_Rs1_simple(): parse_regex(r"R'ABC'", pattern=[RE,RE,S]) | |
30 | +def test_rs2_simple(): parse_regex(r"r'abc'", pattern=[RE,RE,S]) | |
31 | +def test_Rd1_simple(): parse_regex(r'R"ABC"', pattern=[RE,RE,S]) | |
32 | +def test_rd2_simple(): parse_regex(r'r"abc"', pattern=[RE,RE,S]) | |
33 | + | |
34 | +def test_grammar_re_no_slash(): parse_regex(r"/((\\/)|[^\/])*/") | |
35 | +def test_grammar_auto_re_no_slash(): parse_regex("/" + re_no_slash().to_match +"/") # Same as above (unless grammar changes | |
36 | + | |
37 | + | |
38 | + |
@@ -0,0 +1,52 @@ | ||
1 | +import pytest | |
2 | + | |
3 | +from grammar import * | |
4 | +import arpeggio | |
5 | +RE, S = arpeggio.RegExMatch, arpeggio.StrMatch # shortcut | |
6 | + | |
7 | +show_dot=True | |
8 | + | |
9 | +def parse_str(str, pattern=[S, RE, S]): | |
10 | + #print(f'\nXXX >>{str}<<') | |
11 | + parser = ParserPython(str_term, comment, debug=show_dot) | |
12 | + tree = parser.parse(str) | |
13 | + assert tree.position_end == len(str) , f"Not parsed whole input; Only: >>{str[tree.position: tree.position_end]}<<; Not: >>{str[tree.position_end:]}<<." | |
14 | + | |
15 | + assert isinstance(tree.rule, arpeggio.OrderedChoice) and tree.rule_name == "str_term" | |
16 | + assert len(tree) == 3, "regex_term is an Ordered_Choice of always 3 Terminals" | |
17 | + | |
18 | + for e in tree: assert isinstance(e, arpeggio.Terminal) | |
19 | + if pattern: | |
20 | + for e,T in zip(tree,pattern): | |
21 | + if T is not None: assert isinstance(e.rule, T), f"{type(e.rule).__name__} doesn't match {T.__name__}" | |
22 | + | |
23 | + | |
24 | + return tree | |
25 | + | |
26 | + | |
27 | +def test_s1(): parse_str("'single 1'") | |
28 | +def test_d1(): parse_str('"double 1"') | |
29 | +def test_s3(): parse_str(r"'''single 3 b'''") | |
30 | +def test_d3(): parse_str('"""double 3"""') | |
31 | + | |
32 | +def test_multiline_s(): parse_str("""''' | |
33 | +line 1 | |
34 | +line 2 | |
35 | +'''""") | |
36 | +def test_multiline_d(): parse_str('''""" | |
37 | +line 1 | |
38 | +line 2 | |
39 | +"""''') | |
40 | + | |
41 | +def test_multiline_only3(): | |
42 | + import arpeggio | |
43 | + with pytest.raises(arpeggio.NoMatch): | |
44 | + parse_str("""' | |
45 | + line 1 | |
46 | + line 2 | |
47 | + '""") | |
48 | + with pytest.raises(arpeggio.NoMatch): | |
49 | + parse_str('''" | |
50 | + line 1 | |
51 | + line 2 | |
52 | + "''') |
@@ -0,0 +1,49 @@ | ||
1 | +import pytest | |
2 | +from grammar import * | |
3 | + | |
4 | +import arpeggio | |
5 | + | |
6 | +R, S, X = regex_term.__name__, str_term.__name__, rule_crossref.__name__ # shortcut in grammar | |
7 | + | |
8 | +show_dot=True | |
9 | +def parse_sequence(txt, pattern=None, show=False): | |
10 | + parser = ParserPython(sequence, comment, debug=show_dot) | |
11 | + tree = parser.parse(txt) | |
12 | + if show : print(f'\nPARSE_SEQEUENCE >>{txt}<<\n{tree.tree_str()}') | |
13 | + | |
14 | + assert tree.position_end == len(txt) , f"Not parsed whole input; Only: >>{txt[tree.position: tree.position_end]}<<; Not: >>{txt[tree.position_end:]}<<." | |
15 | + assert isinstance(tree.rule, arpeggio.ZeroOrMore) and tree.rule_name == "sequence" | |
16 | + | |
17 | + if pattern: validate_pattern(tree,pattern=pattern, show=show) | |
18 | + return tree | |
19 | + | |
20 | +def validate_pattern(tree, pattern=None, sub=0, show=False): | |
21 | + if show: print(f'\nVALIDATE_PATTERN pattern={pattern} sublevel={sub}\n{tree.tree_str()}') | |
22 | + assert len(tree) == len(pattern), f"Not correct number-of-element at sublevel={sub}" | |
23 | + | |
24 | + exs=tree.prefix.suffix.expression | |
25 | + #if show: print("EXS", type(exs), "\n"+exs.tree_str()) | |
26 | + for ex,p in zip(exs, pattern): | |
27 | + if p is not None: | |
28 | + if not isinstance(p, (tuple, list)): | |
29 | + assert ex[0].rule_name == p, f"{ex} doesn't match given {p} sublevel={sub}" | |
30 | + else: | |
31 | + #print('XXX1\n', ex[1][0].tree_str()) | |
32 | + validate_pattern(tree=ex[1][0], pattern=p, sub=sub+1, show=show) | |
33 | + | |
34 | + | |
35 | + | |
36 | +def test_simple_1(): parse_sequence(r"abc", pattern=[X]) | |
37 | +def test_simple_2(): parse_sequence(r'A Bc', pattern=[X, X]) | |
38 | +def test_mix(): parse_sequence(r'/regex/ "string" crossref crossref', pattern=[R,S, X, X]) | |
39 | + | |
40 | +def test_sub(): parse_sequence(r'( A B )', pattern=[(X, X)]) | |
41 | +def test_mix_nosub(): parse_sequence(r'/regex/ "string" ( A B ) crossref', pattern=[R,S, None, X]) | |
42 | +def test_mix_sub(): parse_sequence(r'/regex/ "string" ( A B ) crossref', pattern=[R,S, (X, X), X]) | |
43 | + | |
44 | +def test_sub_sub(): parse_sequence(r'level0 ( level1_1 (level2a level2b ) level1_2) level0', pattern=[X, (X, (X,X), X), X]) | |
45 | +def test_sub_sub2(): parse_sequence(r'level0 ( level1_1 (level2a level2b ) level1_2) level0', pattern=[X, [X, [X,X], X], X]) | |
46 | + | |
47 | + | |
48 | + | |
49 | + |
@@ -0,0 +1,30 @@ | ||
1 | +import pytest | |
2 | +from grammar import * | |
3 | + | |
4 | +import arpeggio | |
5 | +RE, S = arpeggio.RegExMatch, arpeggio.StrMatch # shortcut | |
6 | + | |
7 | +show_dot=True | |
8 | + | |
9 | +def parse_rule(txt, pattern=None): | |
10 | + print(f'\nXXX >>{txt}<<') | |
11 | + parser = ParserPython(rule, comment, debug=show_dot) | |
12 | + tree = parser.parse(txt) | |
13 | + assert tree.position_end == len(txt) , f"Not parsed whole input; Only: >>{txt[tree.position: tree.position_end]}<<; Not: >>{txt[tree.position_end:]}<<." | |
14 | + | |
15 | + print(f'\nTREE\n{tree.tree_str()}') | |
16 | + | |
17 | +# assert len(tree) == 3, "txt_term is an Ordered_Choice of always 3 Terminals" | |
18 | +# assert isinstance(tree.txt, arpeggio.OrderedChoice) and tree.txt_name == "rule_term" | |
19 | + | |
20 | + | |
21 | +# for e in tree: assert isinstance(e, arpeggio.Terminal) | |
22 | +# if pattern: | |
23 | +# for e,T in zip(tree,pattern): | |
24 | +# if T is not None: assert isinstance(e.rule, T), f"{type(e.rule).__name__} doesn't match {T.__name__}" | |
25 | + | |
26 | + return tree | |
27 | + | |
28 | +def test_simple(): parse_rule(r"R <- A B C ;") | |
29 | + | |
30 | + |
@@ -0,0 +1,17 @@ | ||
1 | +import pytest | |
2 | + | |
3 | +from grammar import * | |
4 | + | |
5 | +show_dot=True | |
6 | + | |
7 | +def parse_file(filename, dir="pytst"): | |
8 | + with open(dir +"/" + filename) as f: | |
9 | + txt = f.read() | |
10 | + | |
11 | + parser = ParserPython(peg_grammar, comment, debug=show_dot) | |
12 | + tree = parser.parse(txt) | |
13 | + | |
14 | + assert tree.position_end == len(txt) , f"Not parsed whole input; Only: >>{regex[tree.position: tree.position_end]}<<; Not: >>{regex[tree.position_end:]}<<." | |
15 | + return tree | |
16 | + | |
17 | +def test_grammar(): parse_file("grammar.peg") |
@@ -0,0 +1,81 @@ | ||
1 | +/* (C) Albert Mietus. Demo and test */ | |
2 | + | |
3 | +from My_ExpressionParser import Fixed_ExpressionParser, Init_ExpressionParser, Port_ExpressionParser; | |
4 | + | |
5 | +from std.data.grammars import Grammer, Rule, ID; | |
6 | +from std.algorithms.parser import PEG; | |
7 | + | |
8 | +implement ParserDemo.Base_ExpressionParser { | |
9 | +exp on self.text { | |
10 | + """Just call parse the input --using the parser-- and output it.""" | |
11 | + self.tree := self.parser.parse(exp); | |
12 | +} | |
13 | +} //End Base_ExpressionParser | |
14 | +/*****************************************************************************************************/ | |
15 | + | |
16 | + | |
17 | +implement ParserDemo.Fixed_ExpressionParser { | |
18 | + | |
19 | +- Grammar: my_expression(){ | |
20 | + // ToDo: logic expression: (&&,etc | |
21 | + | |
22 | + Rule: expr; | |
23 | + Rule: atom_value, unary, exponentiation, multy_divide, add_minus; | |
24 | + Rule: literal_value, literal_number, literal_str; | |
25 | + Rule: named_value, variable, funcall parms; | |
26 | + | |
27 | + atom_value <- literal_value | |
28 | + | '(' expr ')' | |
29 | + | named_value | |
30 | + ; | |
31 | + literal_value <- literal_number | |
32 | + | literal_str | |
33 | + //| Booleans etc. | |
34 | + ; | |
35 | + literal_number <- [:digit:]+ | |
36 | + //| ToDo: Float, Complex ect. | |
37 | + ; | |
38 | + literal_str <- '"' ( [^\'\n]* ) '"' // Note: \'==='; but highlighting is better | |
39 | + | "'" ( [^\"\n]* ) "'" | |
40 | + | '"""' ( [.\n]*? ) '"""' | |
41 | + | "'''" ( [.\n]*? ) "'''" | |
42 | + ; | |
43 | + name_value <- variable | |
44 | + | funcall | |
45 | + ; | |
46 | + funcall <- ID '(' parms ')'; | |
47 | + variable <- ID; | |
48 | + parms <- expr (',' expr )* // Is this correct wrt recursion? | |
49 | + | |
50 | + unary <- ('+' | '-')? atom_value; | |
51 | + exponentiation <- unary ('**' expr)?; | |
52 | + multy_divide <- exponentiation (('*' | '/' | '%') expr)?; | |
53 | + add_minus <- multy_divide (('+'|'-') expr)?; | |
54 | + expr < add_minus; | |
55 | + | |
56 | + return expr; | |
57 | +} | |
58 | + | |
59 | +- init() { | |
60 | + Grammar g = self.my_expression() | |
61 | + self.parser = PEG(g); | |
62 | +} | |
63 | + | |
64 | +} //End Fixed_ExpressionParser | |
65 | +/*****************************************************************************************************/ | |
66 | + | |
67 | + | |
68 | +implement ParserDemo.Init_ExpressionParser { | |
69 | +- init(Grammer:g) { | |
70 | + self.parser = PEG(g); | |
71 | +} | |
72 | +} //End Init_ExpressionParser | |
73 | +/*****************************************************************************************************/ | |
74 | + | |
75 | + | |
76 | +implement ParserDemo.Port_ExpressionParser { | |
77 | +g on expression | |
78 | +{ | |
79 | + self.parser = PEG(g); | |
80 | +} | |
81 | +} //End Port_ExpressionParser | |
\ No newline at end of file |
@@ -0,0 +1,23 @@ | ||
1 | +/* (C) Albert Mietus. Demo and test */ | |
2 | + | |
3 | +from ParserDemo import Abstract_Parser, Base_ChunkParser; | |
4 | +from std.data.grammars import Grammer | |
5 | + | |
6 | + | |
7 | +component Fixed_ExpressionParser : Abstract_Parser, Base_ChunkParser { | |
8 | + """The Fixed_ExpressionParser has the grammer build-in; unlike the :ref:`Init_ExpressionParser` and :ref:`Port_ExpressionParser`""" | |
9 | +} | |
10 | + | |
11 | + | |
12 | + | |
13 | +component Init_ExpressionParser : Base_ExpressionParser { | |
14 | + """The Init_ExpressionParser reads the grammer when it is initialized""" | |
15 | +- init(Grammer:g) | |
16 | +} | |
17 | + | |
18 | + | |
19 | + | |
20 | +component Port_ExpressionParser : Base_ExpressionParser { | |
21 | + """The Port_ExpressionParser reads the grammer via a port (unlike the :ref:`Fixed_ExpressionParser`.""" | |
22 | + port Grammar<in> expression; | |
23 | +} |
@@ -0,0 +1,67 @@ | ||
1 | +/* (C) Albert Mietus. Demo and test */ | |
2 | + | |
3 | +from My_GrammarParser import GrammarParser; | |
4 | +from std.data.grammars import Grammer, Rule, ID, STR, REGEXP; | |
5 | +from std.algorithms.parser import PEG; | |
6 | + | |
7 | +implement GrammerParser{ | |
8 | +- init() { | |
9 | + Grammar g = self.my_grammar() | |
10 | + self.parser = PEG(g); | |
11 | +} | |
12 | + | |
13 | +- Grammar: my_grammar(){ | |
14 | + """Design: we use more-or-less the same names for non-terminals as Arpeggio uses. | |
15 | + | |
16 | + .. seealso:: | |
17 | + | |
18 | + * http://textx.github.io/Arpeggio/stable/grammars/#grammars-written-in-peg-notations | |
19 | + * https://github.com/textX/Arpeggio/blob/master/arpeggio/peg.py | |
20 | + """ | |
21 | + | |
22 | + Rule: peg_grammar, rule, rule_name, rule_crossref; | |
23 | + Rule: ordered_choice, sequence, prefix, suffix, expression; | |
24 | + Rule: regex, , str_match | |
25 | + Rule: AND, NOT, LEFT_ARROW, ORDERED_CHOICE; | |
26 | + | |
27 | + // ToDo Comment, str_match; | |
28 | + peg_grammar <- rule+; | |
29 | + rule <- rule_name '<-' ordered_choice ';' ; | |
30 | + ordered_choice <- sequence ('|' sequence)* ; | |
31 | + sequence <- prefix* ; | |
32 | + prefix <- ( AND | NOT )? suffix ; | |
33 | + suffix <- expression ( '?' | '*' | '+' | '#' )? | |
34 | + expression <- regex_term | |
35 | + | rule_crossref | |
36 | + | '(' ordered_choice ')' | |
37 | + | str_term | |
38 | + ; | |
39 | + rule_crossref <- rule_name; | |
40 | + rule_name <- ID ; | |
41 | + regex_term <- '/' str_no_slash '/' | |
42 | + | "r'" str_no_s1 "'" | |
43 | + | 'r"' str_no_d1 '"' | |
44 | + | "r'''" str_no_s3 "'''" | |
45 | + | 'r"""' str_no_d3 '"""' | |
46 | + | "R'" str_no_s1 "'" | |
47 | + | 'R"' str_no_d1 '"' | |
48 | + | "R'''" str_no_s3 "'''" | |
49 | + | 'R"""' str_no_d3 '"""' | |
50 | + ; | |
51 | + str_term <- "'" str_no_s1 "'" | |
52 | + | '"' str_no_d1 '"' | |
53 | + | "'''" str_no_s3 "'''" | |
54 | + | '"""' str_no_d3 '"""' | |
55 | + str_no_slash <- /[^/\\]*(?:\\.[^/\\]*)*/ ; | |
56 | + str_no_XXX <- XXX | |
57 | + | ETC | |
58 | + ; | |
59 | + AND <- '&' ; | |
60 | + NOT <- '!' ; | |
61 | + | |
62 | + return peg_grammar; | |
63 | + | |
64 | +} | |
65 | +} //End of GrammerParser | |
66 | +/*****************************************************************************************************/ | |
67 | + |
@@ -0,0 +1,6 @@ | ||
1 | +/* (C) Albert Mietus. Demo and test */ | |
2 | + | |
3 | +from ParserDemo import Abstract_Parser, Base_ChunkParser; | |
4 | + | |
5 | +component GrammerParser : Abstract_Parser, Base_ChunkParser { | |
6 | +} | |
\ No newline at end of file |
@@ -0,0 +1,43 @@ | ||
1 | +/* (C) Albert Mietus. Demo and test */ | |
2 | + | |
3 | +from std.data.grammars import Grammer; | |
4 | + | |
5 | + | |
6 | +component Abstract_Parser : Component { | |
7 | + """A *Parser* parses a string, using a (formal) grammar, and outputs the ParseTree. | |
8 | + | |
9 | + Typically, the string is read via the in-port, which triggers processing, and results in a new ParseTree (or: | |
10 | + 'concrete syntax tree') dispatched via the output-port. | |
11 | + | |
12 | + The grammar can be *hardcoded* in a final component or passed in via a port or during initialization. | |
13 | + | |
14 | + .. seealso:: Some example components | |
15 | + | |
16 | + * ref:`Fixed_ExpressionParser` | |
17 | + * ref:`Init_ExpressionParser` | |
18 | + * ref:`Port_ExpressionParser` | |
19 | + """ | |
20 | + | |
21 | + port str<in> text; | |
22 | + port tree<out> parse_tree; | |
23 | +} | |
24 | + | |
25 | + | |
26 | + | |
27 | +component Base_ChunkParser: Abstract_Parser { | |
28 | + """A *ChunkParser* typically parses a sub-string of a bigger text; on behalve of the main-parser. | |
29 | + | |
30 | + By example: the main-parser may recognize a expression as a whole, transfer that text to a (Expression) | |
31 | + ChunkParser, that parsers the details. The resulting ParseTree (of the expression) is that integrated into the | |
32 | + main ParseTree. | |
33 | + | |
34 | + By using a *ChunkParser*, the main parsers does not need to be awaire of details, like precedence and | |
35 | + associativity of the operartors. And so, the *main grammar* is easier; there is no need to handle the (typical) | |
36 | + `left-recursion <https://en.wikipedia.org/wiki/Left_recursion>`_ of expression, nor or other pitfaills. All that | |
37 | + work is for the "chunk". | |
38 | + """ | |
39 | +} | |
40 | + | |
41 | + | |
42 | + | |
43 | + |