From 8898ad9b25fca6afe2374d293a981db02a83d7e9 Mon Sep 17 00:00:00 2001 From: "FWoltermann@gmail.com" Date: Thu, 31 May 2012 14:46:27 +0000 Subject: Committing the documentation to svn to have it accessible online --- Doc/doxygen/html/_parser_8cpp_source.html | 426 ++++++++++++++++++++++++++++++ 1 file changed, 426 insertions(+) create mode 100644 Doc/doxygen/html/_parser_8cpp_source.html (limited to 'Doc/doxygen/html/_parser_8cpp_source.html') diff --git a/Doc/doxygen/html/_parser_8cpp_source.html b/Doc/doxygen/html/_parser_8cpp_source.html new file mode 100644 index 0000000..8568c49 --- /dev/null +++ b/Doc/doxygen/html/_parser_8cpp_source.html @@ -0,0 +1,426 @@ + + + + + +Starshatter_Open: D:/SRC/StarshatterSVN/Parser/Parser.cpp Source File + + + + + + + + + + + + + +
+
+ + + + + + +
+
Starshatter_Open +
+
Open source Starshatter engine
+
+
+ + + + + +
+
+ +
+
+
+ +
+ + + + +
+ +
+ +
+
+
Parser.cpp
+
+
+Go to the documentation of this file.
1 /* Project STARS
+
2  John DiCamillo Software Consulting
+
3  Copyright © 1997-2004. All Rights Reserved.
+
4 
+
5  SUBSYSTEM: parser
+
6  FILE: parser.cpp
+
7  AUTHOR: John DiCamillo
+
8 
+
9 
+
10  OVERVIEW
+
11  ========
+
12  Implementation of the generic Parser class
+
13 */
+
14 
+
15 #include "MemDebug.h"
+
16 #include "reader.h"
+
17 #include "token.h"
+
18 #include "parser.h"
+
19 #include <stdio.h>
+
20 #include <stdlib.h>
+
21 
+ +
23 
+
24 void Print(const char* fmt, ...);
+
25 
+
26 static int dump_tokens = 0;
+
27 
+
28 // +-------------------------------------------------------------------+
+
29 
+
30 Term* error(char* msg, const Token& token)
+
31 {
+
32  static char buf[1024];
+
33  sprintf_s(buf, " near '%s' in line %d.", (const char*) token.symbol(), token.line());
+
34 
+
35  return error(msg, buf);
+
36 }
+
37 
+
38 // +-------------------------------------------------------------------+
+
39 
+ +
41 {
+
42  reader = r ? r : new(__FILE__, __LINE__) ConsoleReader;
+
43  lexer = new(__FILE__, __LINE__) Scanner(reader);
+
44 
+
45  Token::addKey("true", KEY_TRUE);
+
46  Token::addKey("false", KEY_FALSE);
+
47  Token::addKey(":", KEY_DEF);
+ +
49 }
+
50 
+ +
52 {
+
53  delete lexer;
+
54  delete reader;
+
55  //Token::close();
+
56 }
+
57 
+
58 Term*
+ +
60 {
+
61  Term* t = ParseTermBase();
+
62  if (t == 0) return t;
+
63 
+
64  Term* t2 = ParseTermRest(t);
+
65 
+
66  return t2;
+
67 }
+
68 
+
69 Term*
+ +
71 {
+
72  Token t = lexer->Get();
+
73 
+
74  switch (t.type()) {
+
75  default:
+
76  lexer->PutBack();
+
77  return base;
+
78 
+
79  case Token::StringLiteral: {
+
80  // concatenate adjacent string literal tokens:
+
81  TermText* text = base->isText();
+
82  if (text) {
+
83  TermText* base2 = new(__FILE__, __LINE__) TermText(text->value() + t.symbol()(1, t.symbol().length()-2));
+
84  delete base;
+
85  return ParseTermRest(base2);
+
86  }
+
87  else {
+
88  lexer->PutBack();
+
89  }
+
90  }
+
91  break;
+
92 
+
93  case Token::Keyword:
+
94  switch (t.key()) {
+
95  case KEY_DEF:
+
96  if (base->isText())
+
97  return new(__FILE__, __LINE__) TermDef(base->isText(), ParseTerm());
+
98  else
+
99  return error("(Parse) illegal lhs in def", t);
+
100 
+
101  default:
+
102  lexer->PutBack();
+
103  return base;
+
104  }
+
105  break;
+
106  }
+
107 
+
108  return base;
+
109 }
+
110 
+
111 static int xtol(const char* p)
+
112 {
+
113  int n = 0;
+
114 
+
115  while (*p) {
+
116  char digit = *p++;
+
117  n *= 16;
+
118 
+
119  if (digit >= '0' && digit <= '9')
+
120  n += digit - '0';
+
121 
+
122  else if (digit >= 'a' && digit <= 'f')
+
123  n += digit - 'a' + 10;
+
124 
+
125  else if (digit >= 'A' && digit <= 'F')
+
126  n += digit - 'A' + 10;
+
127  }
+
128 
+
129  return n;
+
130 }
+
131 
+
132 Term*
+ +
134 {
+
135  Token t = lexer->Get();
+
136  int n = 0;
+
137  double d = 0.0;
+
138 
+
139  switch (t.type()) {
+
140  case Token::IntLiteral: {
+
141  if (dump_tokens)
+
142  Print("%s", t.symbol().data());
+
143 
+
144  char nstr[256], *p = nstr;
+
145  for (int i = 0; i < (int) t.symbol().length(); i++)
+
146  if (t.symbol()[i] != '_')
+
147  *p++ = t.symbol()[i];
+
148  *p++ = '\0';
+
149 
+
150  // handle hex notation:
+
151  if (nstr[1] == 'x')
+
152  n = xtol(nstr+2);
+
153 
+
154  else
+
155  n = atol(nstr);
+
156 
+
157  return new(__FILE__, __LINE__) TermNumber(n);
+
158  }
+
159 
+
160  case Token::FloatLiteral: {
+
161  if (dump_tokens)
+
162  Print("%s", t.symbol().data());
+
163 
+
164  char nstr[256], *p = nstr;
+
165  for (int i = 0; i < (int) t.symbol().length(); i++)
+
166  if (t.symbol()[i] != '_')
+
167  *p++ = t.symbol()[i];
+
168  *p++ = '\0';
+
169 
+
170  d = atof(nstr);
+
171  return new(__FILE__, __LINE__) TermNumber(d);
+
172  }
+
173 
+ +
175  if (dump_tokens)
+
176  Print("%s", t.symbol().data());
+
177 
+
178  return new(__FILE__, __LINE__) TermText(t.symbol()(1, t.symbol().length()-2));
+
179 
+
180  case Token::AlphaIdent:
+
181  if (dump_tokens)
+
182  Print("%s", t.symbol().data());
+
183 
+
184  return new(__FILE__, __LINE__) TermText(t.symbol());
+
185 
+
186  case Token::Keyword:
+
187  if (dump_tokens)
+
188  Print("%s", t.symbol().data());
+
189 
+
190  switch (t.key()) {
+
191  case KEY_FALSE: return new(__FILE__, __LINE__) TermBool(0);
+
192  case KEY_TRUE: return new(__FILE__, __LINE__) TermBool(1);
+
193 
+
194  case KEY_MINUS: {
+
195  Token next = lexer->Get();
+
196  if (next.type() == Token::IntLiteral) {
+
197  if (dump_tokens)
+
198  Print("%s", next.symbol().data());
+
199 
+
200  char nstr[256], *p = nstr;
+
201  for (int i = 0; i < (int) next.symbol().length(); i++)
+
202  if (next.symbol()[i] != '_')
+
203  *p++ = next.symbol()[i];
+
204  *p++ = '\0';
+
205 
+
206  n = -1 * atol(nstr);
+
207  return new(__FILE__, __LINE__) TermNumber(n);
+
208  }
+
209  else if (next.type() == Token::FloatLiteral) {
+
210  if (dump_tokens)
+
211  Print("%s", next.symbol().data());
+
212 
+
213  char nstr[256], *p = nstr;
+
214  for (int i = 0; i < (int) next.symbol().length(); i++)
+
215  if (next.symbol()[i] != '_')
+
216  *p++ = next.symbol()[i];
+
217  *p++ = '\0';
+
218 
+
219  d = -1.0 * atof(nstr);
+
220  return new(__FILE__, __LINE__) TermNumber(d);
+
221  }
+
222  else {
+
223  lexer->PutBack();
+
224  return error("(Parse) illegal token '-': number expected", next);
+
225  }
+
226  }
+
227  break;
+
228 
+
229  default:
+
230  lexer->PutBack();
+
231  return 0;
+
232  }
+
233 
+
234  case Token::LParen: return ParseArray();
+
235 
+
236  case Token::LBrace: return ParseStruct();
+
237 
+
238  case Token::CharLiteral:
+
239  return error("(Parse) illegal token ", t);
+
240 
+
241  default:
+
242  lexer->PutBack();
+
243  return 0;
+
244  }
+
245 }
+
246 
+
247 TermArray*
+ +
249 {
+
250  TermList* elems = ParseTermList(0);
+
251  Token end = lexer->Get();
+
252 
+
253  if (end.type() != Token::RParen)
+
254  return (TermArray*) error("(Parse) ')' missing in array-decl", end);
+
255 
+
256  return new(__FILE__, __LINE__) TermArray(elems);
+
257 }
+
258 
+
259 TermStruct*
+ +
261 {
+
262  TermList* elems = ParseTermList(1);
+
263  Token end = lexer->Get();
+
264 
+
265  if (end.type() != Token::RBrace)
+
266  return (TermStruct*) error("(Parse) '}' missing in struct", end);
+
267 
+
268  return new(__FILE__, __LINE__) TermStruct(elems);
+
269 }
+
270 
+
271 TermList*
+
272 Parser::ParseTermList(int for_struct)
+
273 {
+
274  TermList* tlist = new(__FILE__, __LINE__) TermList;
+
275 
+
276  Term* term = ParseTerm();
+
277  while (term) {
+
278  if (for_struct && !term->isDef()) {
+
279  return (TermList*) error("(Parse) non-definition term in struct");
+
280  }
+
281  else if (!for_struct && term->isDef()) {
+
282  return (TermList*) error("(Parse) illegal definition in array");
+
283  }
+
284 
+
285  tlist->append(term);
+
286  Token t = lexer->Get();
+
287 
+
288  /*** OLD WAY: COMMA SEPARATORS REQUIRED ***
+
289  if (t.type() != Token::Comma) {
+
290  lexer->PutBack();
+
291  term = 0;
+
292  }
+
293  else
+
294  term = ParseTerm();
+
295  /*******************************************/
+
296 
+
297  // NEW WAY: COMMA SEPARATORS OPTIONAL:
+
298  if (t.type() != Token::Comma) {
+
299  lexer->PutBack();
+
300  }
+
301 
+
302  term = ParseTerm();
+
303  }
+
304 
+
305  return tlist;
+
306 }
+
307 
+
308 
+
309 
+
+
+ + + + -- cgit v1.1