diff options
author | Aki <please@ignore.pl> | 2022-03-27 17:42:26 +0200 |
---|---|---|
committer | Aki <please@ignore.pl> | 2022-03-27 17:42:26 +0200 |
commit | a12e588079700d55a0b788fea2df7727c2e41f52 (patch) | |
tree | 690072569624c73af5043b3619acaab4d298b1fa /Stars45/Parser.cpp | |
parent | 72bb517271dad40a440533ad0796a88247011199 (diff) | |
download | starshatter-a12e588079700d55a0b788fea2df7727c2e41f52.zip starshatter-a12e588079700d55a0b788fea2df7727c2e41f52.tar.gz starshatter-a12e588079700d55a0b788fea2df7727c2e41f52.tar.bz2 |
Removed MemDebug from FoundationEx
Diffstat (limited to 'Stars45/Parser.cpp')
-rw-r--r-- | Stars45/Parser.cpp | 31 |
1 files changed, 15 insertions, 16 deletions
diff --git a/Stars45/Parser.cpp b/Stars45/Parser.cpp index b1c9ef5..09827cf 100644 --- a/Stars45/Parser.cpp +++ b/Stars45/Parser.cpp @@ -11,7 +11,6 @@ Implementation of the generic Parser class */ -#include "MemDebug.h" #include "Reader.h" #include "Token.h" #include "Parser.h" @@ -38,8 +37,8 @@ Term* error(char* msg, const Token& token) Parser::Parser(Reader* r) { - reader = r ? r : new(__FILE__, __LINE__) ConsoleReader; - lexer = new(__FILE__, __LINE__) Scanner(reader); + reader = r ? r : new ConsoleReader; + lexer = new Scanner(reader); Token::addKey("true", KEY_TRUE); Token::addKey("false", KEY_FALSE); @@ -79,7 +78,7 @@ Parser::ParseTermRest(Term* base) // concatenate adjacent string literal tokens: TermText* text = base->isText(); if (text) { - TermText* base2 = new(__FILE__, __LINE__) TermText(text->value() + t.symbol()(1, t.symbol().length()-2)); + TermText* base2 = new TermText(text->value() + t.symbol()(1, t.symbol().length()-2)); delete base; return ParseTermRest(base2); } @@ -93,7 +92,7 @@ Parser::ParseTermRest(Term* base) switch (t.key()) { case KEY_DEF: if (base->isText()) - return new(__FILE__, __LINE__) TermDef(base->isText(), ParseTerm()); + return new TermDef(base->isText(), ParseTerm()); else return error("(Parse) illegal lhs in def", t); @@ -153,7 +152,7 @@ Parser::ParseTermBase() else n = atol(nstr); - return new(__FILE__, __LINE__) TermNumber(n); + return new TermNumber(n); } case Token::FloatLiteral: { @@ -167,28 +166,28 @@ Parser::ParseTermBase() *p++ = '\0'; d = atof(nstr); - return new(__FILE__, __LINE__) TermNumber(d); + return new TermNumber(d); } case Token::StringLiteral: if (dump_tokens) Print("%s", t.symbol().data()); - return new(__FILE__, __LINE__) TermText(t.symbol()(1, t.symbol().length()-2)); + return new TermText(t.symbol()(1, t.symbol().length()-2)); case Token::AlphaIdent: if (dump_tokens) Print("%s", t.symbol().data()); - return new(__FILE__, __LINE__) TermText(t.symbol()); + return new TermText(t.symbol()); case Token::Keyword: if (dump_tokens) Print("%s", t.symbol().data()); switch (t.key()) { - case KEY_FALSE: return new(__FILE__, __LINE__) TermBool(0); - case KEY_TRUE: return new(__FILE__, __LINE__) TermBool(1); + case KEY_FALSE: return new TermBool(0); + case KEY_TRUE: return new TermBool(1); case KEY_MINUS: { Token next = lexer->Get(); @@ -203,7 +202,7 @@ Parser::ParseTermBase() *p++ = '\0'; n = -1 * atol(nstr); - return new(__FILE__, __LINE__) TermNumber(n); + return new TermNumber(n); } else if (next.type() == Token::FloatLiteral) { if (dump_tokens) @@ -216,7 +215,7 @@ Parser::ParseTermBase() *p++ = '\0'; d = -1.0 * atof(nstr); - return new(__FILE__, __LINE__) TermNumber(d); + return new TermNumber(d); } else { lexer->PutBack(); @@ -252,7 +251,7 @@ Parser::ParseArray() if (end.type() != Token::RParen) return (TermArray*) error("(Parse) ')' missing in array-decl", end); - return new(__FILE__, __LINE__) TermArray(elems); + return new TermArray(elems); } TermStruct* @@ -264,13 +263,13 @@ Parser::ParseStruct() if (end.type() != Token::RBrace) return (TermStruct*) error("(Parse) '}' missing in struct", end); - return new(__FILE__, __LINE__) TermStruct(elems); + return new TermStruct(elems); } TermList* Parser::ParseTermList(int for_struct) { - TermList* tlist = new(__FILE__, __LINE__) TermList; + TermList* tlist = new TermList; Term* term = ParseTerm(); while (term) { |