diff --git a/modcc/blocks.hpp b/modcc/blocks.hpp index a2c098aa5e81cbc0dd6df3ec5bb64243a5126c27..df77a04939160b2a8dbbb4bb7186e376195abdc5 100644 --- a/modcc/blocks.hpp +++ b/modcc/blocks.hpp @@ -23,44 +23,42 @@ struct IonDep { return writes_variable(name) || reads_variable(name); }; bool uses_current() const { - return has_variable("i"+name); + return has_variable("i" + name); }; bool uses_rev_potential() const { - return has_variable("e"+name); + return has_variable("e" + name); }; bool uses_concentration_int() const { - return has_variable(name+"i"); + return has_variable(name + "i"); }; bool uses_concentration_ext() const { - return has_variable(name+"o"); + return has_variable(name + "o"); }; bool writes_current() const { - return writes_variable("i"+name); + return writes_variable("i" + name); }; bool writes_concentration_int() const { - return writes_variable(name+"i"); + return writes_variable(name + "i"); }; bool writes_concentration_ext() const { - return writes_variable(name+"o"); + return writes_variable(name + "o"); }; bool writes_rev_potential() const { - return writes_variable("e"+name); + return writes_variable("e" + name); }; bool uses_valence() const { - return valence_var.type==tok::identifier; + return valence_var.type == tok::identifier; } bool verifies_valence() const { return has_valence_expr && !uses_valence(); } bool reads_variable(const std::string& name) const { - return std::find_if(read.begin(), read.end(), - [&name](const Token& t) {return t.spelling==name;}) != read.end(); + return std::find_if(read.begin(), read.end(), [&name](const Token& t) { return t.spelling == name; }) != read.end(); } bool writes_variable(const std::string& name) const { - return std::find_if(write.begin(), write.end(), - [&name](const Token& t) {return t.spelling==name;}) != write.end(); + return std::find_if(write.begin(), write.end(), [&name](const Token& t) { return t.spelling == name; }) != write.end(); } }; @@ -71,11 +69,12 @@ struct Id { // string == no value unit_tokens units; - std::pair<Token, Token> range; // empty component => no range set + std::pair<std::string, std::string> range; // empty component => no range set - Id(Token const& t, std::string const& v, unit_tokens const& u) - : token(t), value(v), units(u) - {} + Id(Token const& t, std::string const& v, unit_tokens const& u): + token(t), + value(v), + units(u) {} Id() {} @@ -84,7 +83,7 @@ struct Id { } bool has_range() const { - return !range.first.spelling.empty(); + return !range.first.empty(); } std::string unit_string() const { @@ -113,7 +112,7 @@ struct NeuronBlock { std::vector<Token> globals; Token nonspecific_current; bool has_nonspecific_current() const { - return nonspecific_current.spelling.size()>0; + return nonspecific_current.spelling.size() > 0; } }; diff --git a/modcc/parser.cpp b/modcc/parser.cpp index 753b0b80c3892c71f810f48e73176d1773ebee25..d52f9416899074dfaa937b9148fa154d1888c882 100644 --- a/modcc/parser.cpp +++ b/modcc/parser.cpp @@ -10,37 +10,35 @@ // specialize on const char* for lazy evaluation of compile time strings bool Parser::expect(tok tok, const char* str) { - if(tok==token_.type) { + if (tok == token_.type) { return true; } error( - strlen(str)>0 ? - str - : std::string("unexpected token ")+yellow(token_.spelling)); + strlen(str) > 0 ? str + : std::string("unexpected token ") + yellow(token_.spelling)); return false; } bool Parser::expect(tok tok, std::string const& str) { - if(tok==token_.type) { + if (tok == token_.type) { return true; } error( - str.size()>0 ? - str - : std::string("unexpected token ")+yellow(token_.spelling)); + str.size() > 0 ? str + : std::string("unexpected token ") + yellow(token_.spelling)); return false; } void Parser::error(std::string msg) { std::string location_info = pprintf( - "%:% ", module_ ? module_->source_name() : "", token_.location); - if(status_==lexerStatus::error) { + "%:% ", module_ ? module_->source_name() : "", token_.location); + if (status_ == lexerStatus::error) { // append to current string - error_string_ += "\n" + white(location_info) + "\n " +msg; + error_string_ += "\n" + white(location_info) + "\n " + msg; } else { error_string_ = white(location_info) + "\n " + msg; @@ -50,8 +48,8 @@ void Parser::error(std::string msg) { void Parser::error(std::string msg, Location loc) { std::string location_info = pprintf( - "%:% ", module_ ? module_->source_name() : "", loc); - if(status_==lexerStatus::error) { + "%:% ", module_ ? module_->source_name() : "", loc); + if (status_ == lexerStatus::error) { // append to current string error_string_ += "\n" + green(location_info) + msg; } @@ -61,22 +59,20 @@ void Parser::error(std::string msg, Location loc) { } } -Parser::Parser(Module& m, bool advance) -: Lexer(m.buffer()), - module_(&m) -{ +Parser::Parser(Module& m, bool advance): + Lexer(m.buffer()), + module_(&m) { // prime the first token get_token(); - if(advance) { + if (advance) { parse(); } } -Parser::Parser(std::string const& buf) -: Lexer(buf), - module_(nullptr) -{ +Parser::Parser(std::string const& buf): + Lexer(buf), + module_(nullptr) { // prime the first token get_token(); } @@ -84,56 +80,52 @@ Parser::Parser(std::string const& buf) bool Parser::parse() { // perform first pass to read the descriptive blocks and // record the location of the verb blocks - while(token_.type!=tok::eof) { - switch(token_.type) { - case tok::title : - parse_title(); - break; - case tok::neuron : - parse_neuron_block(); - break; - case tok::state : - parse_state_block(); - break; - case tok::units : - parse_units_block(); - break; - case tok::constant : - parse_constant_block(); - break; - case tok::parameter : - parse_parameter_block(); - break; - case tok::assigned : - parse_assigned_block(); - break; - // INITIAL, KINETIC, DERIVATIVE, PROCEDURE, NET_RECEIVE and BREAKPOINT blocks - // are all lowered to ProcedureExpression - case tok::net_receive: - case tok::breakpoint : - case tok::initial : - case tok::kinetic : - case tok::linear : - case tok::derivative : - case tok::procedure : - { - auto p = parse_procedure(); - if(!p) break; - module_->add_callable(std::move(p)); - } - break; - case tok::function : - { - auto f = parse_function(); - if(!f) break; - module_->add_callable(std::move(f)); - } - break; - default : - error(pprintf("expected block type, found '%'", token_.spelling)); - break; + while (token_.type != tok::eof) { + switch (token_.type) { + case tok::title: + parse_title(); + break; + case tok::neuron: + parse_neuron_block(); + break; + case tok::state: + parse_state_block(); + break; + case tok::units: + parse_units_block(); + break; + case tok::constant: + parse_constant_block(); + break; + case tok::parameter: + parse_parameter_block(); + break; + case tok::assigned: + parse_assigned_block(); + break; + // INITIAL, KINETIC, DERIVATIVE, PROCEDURE, NET_RECEIVE and BREAKPOINT blocks + // are all lowered to ProcedureExpression + case tok::net_receive: + case tok::breakpoint: + case tok::initial: + case tok::kinetic: + case tok::linear: + case tok::derivative: + case tok::procedure: { + auto p = parse_procedure(); + if (!p) break; + module_->add_callable(std::move(p)); + } break; + case tok::function: { + auto f = parse_function(); + if (!f) break; + module_->add_callable(std::move(f)); + } break; + default: + error(pprintf("expected block type, found '%'", token_.spelling)); + break; } - if(status() == lexerStatus::error) { + if (status() == lexerStatus::error) { std::cerr << red("error: ") << error_string_ << std::endl; return false; } @@ -154,27 +146,27 @@ std::vector<Token> Parser::comma_separated_identifiers() { std::vector<Token> tokens; int startline = location_.line; // handle is an empty list at the end of a line - if(peek().location.line > startline) { + if (peek().location.line > startline) { // this happens when scanning WRITE below: // USEION k READ a, b WRITE // leave to the caller to decide whether an empty list is an error return tokens; } - while(1) { + while (1) { get_token(); // first check if a new line was encounterd - if(location_.line > startline) { + if (location_.line > startline) { return tokens; } - else if(token_.type == tok::identifier) { + else if (token_.type == tok::identifier) { tokens.push_back(token_); } - else if(is_keyword(token_)) { + else if (is_keyword(token_)) { error(pprintf("found keyword '%', expected a variable name", token_.spelling)); return tokens; } - else if(token_.type == tok::real || token_.type == tok::integer) { + else if (token_.type == tok::real || token_.type == tok::integer) { error(pprintf("found number '%', expected a variable name", token_.spelling)); return tokens; } @@ -185,12 +177,12 @@ std::vector<Token> Parser::comma_separated_identifiers() { // look ahead to check for a comma. This approach ensures that the // first token after the end of the list is not consumed - if( peek().type == tok::comma ) { + if (peek().type == tok::comma) { // load the comma get_token(); // assert that the list can't run off the end of a line - if(peek().location.line > startline) { - error("line can't end with a '"+yellow(",")+"'"); + if (peek().location.line > startline) { + error("line can't end with a '" + yellow(",") + "'"); return tokens; } } @@ -218,9 +210,9 @@ void Parser::parse_neuron_block() { get_token(); // assert that the block starts with a curly brace - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("NEURON block must start with a curly brace {, found '%'", - token_.spelling)); + token_.spelling)); return; } @@ -231,138 +223,135 @@ void Parser::parse_neuron_block() { // have to count them we have to get the next token before entering the loop // to handle the case of an empty block {} get_token(); - while(token_.type!=tok::rbrace) { - switch(token_.type) { - case tok::threadsafe : - neuron_block.threadsafe = true; - get_token(); // consume THREADSAFE - break; - - case tok::suffix : - case tok::point_process : - neuron_block.kind = (token_.type==tok::suffix) ? moduleKind::density - : moduleKind::point; - - // set the modul kind - module_->kind(neuron_block.kind); - - get_token(); // consume SUFFIX / POINT_PROCESS - // assert that a valid name for the Neuron has been specified - if(token_.type != tok::identifier) { - error(pprintf("invalid name for SUFFIX, found '%'", token_.spelling)); + while (token_.type != tok::rbrace) { + switch (token_.type) { + case tok::threadsafe: + neuron_block.threadsafe = true; + get_token(); // consume THREADSAFE + break; + + case tok::suffix: + case tok::point_process: + neuron_block.kind = (token_.type == tok::suffix) ? moduleKind::density + : moduleKind::point; + + // set the modul kind + module_->kind(neuron_block.kind); + + get_token(); // consume SUFFIX / POINT_PROCESS + // assert that a valid name for the Neuron has been specified + if (token_.type != tok::identifier) { + error(pprintf("invalid name for SUFFIX, found '%'", token_.spelling)); + return; + } + neuron_block.name = token_.spelling; + + get_token(); // consume the name + break; + + // this will be a comma-separated list of identifiers + case tok::global: + // the ranges are a comma-seperated list of identifiers + { + auto identifiers = comma_separated_identifiers(); + // bail if there was an error reading the list + if (status_ == lexerStatus::error) { return; } - neuron_block.name = token_.spelling; - - get_token(); // consume the name - break; - - // this will be a comma-separated list of identifiers - case tok::global : - // the ranges are a comma-seperated list of identifiers - { - auto identifiers = comma_separated_identifiers(); - // bail if there was an error reading the list - if(status_==lexerStatus::error) { - return; - } - for(auto const &id : identifiers) { - neuron_block.globals.push_back(id); - } + for (auto const& id: identifiers) { + neuron_block.globals.push_back(id); } - break; - - // this will be a comma-separated list of identifiers - case tok::range : - // the ranges are a comma-seperated list of identifiers - { - auto identifiers = comma_separated_identifiers(); - if(status_==lexerStatus::error) { // bail if there was an error reading the list - return; - } - for(auto const &id : identifiers) { - neuron_block.ranges.push_back(id); - } + } + break; + + // this will be a comma-separated list of identifiers + case tok::range: + // the ranges are a comma-seperated list of identifiers + { + auto identifiers = comma_separated_identifiers(); + if (status_ == lexerStatus::error) { // bail if there was an error reading the list + return; } - break; + for (auto const& id: identifiers) { + neuron_block.ranges.push_back(id); + } + } + break; - case tok::useion : - { - IonDep ion; - // we have to parse the name of the ion first + case tok::useion: { + IonDep ion; + // we have to parse the name of the ion first + get_token(); + // check this is an identifier token + if (token_.type != tok::identifier) { + error(pprintf("invalid name for an ion chanel '%'", token_.spelling)); + return; + } + + ion.name = token_.spelling; + get_token(); // consume the ion name + + // this loop ensures that we don't gobble any tokens past + // the end of the USEION clause + while (token_.type == tok::read || token_.type == tok::write) { + auto& target = (token_.type == tok::read) ? ion.read + : ion.write; + std::vector<Token> identifiers = comma_separated_identifiers(); + // bail if there was an error reading the list + if (status_ == lexerStatus::error) { + return; + } + for (auto const& id: identifiers) { + target.push_back(id); + } + } + + if (token_.type == tok::valence) { + ion.has_valence_expr = true; + + // consume "Valence" + get_token(); + + // take and consume variable name or signed integer + if (token_.type == tok::identifier) { + ion.valence_var = token_; get_token(); - // check this is an identifier token - if(token_.type != tok::identifier) { - error(pprintf("invalid name for an ion chanel '%'", token_.spelling)); - return; - } - - ion.name = token_.spelling; - get_token(); // consume the ion name - - // this loop ensures that we don't gobble any tokens past - // the end of the USEION clause - while(token_.type == tok::read || token_.type == tok::write) { - auto& target = (token_.type == tok::read) ? ion.read - : ion.write; - std::vector<Token> identifiers - = comma_separated_identifiers(); - // bail if there was an error reading the list - if(status_==lexerStatus::error) { - return; - } - for(auto const &id : identifiers) { - target.push_back(id); - } - } - - if(token_.type == tok::valence) { - ion.has_valence_expr = true; - - // consume "Valence" - get_token(); - - // take and consume variable name or signed integer - if(token_.type == tok::identifier) { - ion.valence_var = token_; - get_token(); - } - else { - ion.expected_valence = value_signed_integer(); - } - } - - // add the ion dependency to the NEURON block - neuron_block.ions.push_back(std::move(ion)); } - break; - - case tok::nonspecific_current : - // Assume that there is one non-specific current per mechanism. - // It would be easy to extend this to multiple currents, - // however there are no mechanisms in the CoreNeuron repository - // that do this - { - get_token(); // consume NONSPECIFIC_CURRENT - - auto tok = token_; - - // parse the current name and check for errors - auto id = parse_identifier(); - if(status_==lexerStatus::error) { - return; - } - - // store the token with nonspecific current's name and location - neuron_block.nonspecific_current = tok; + else { + ion.expected_valence = value_signed_integer(); } - break; + } - // the parser encountered an invalid symbol - default : - error(pprintf("there was an invalid statement '%' in NEURON block", - token_.spelling)); - return; + // add the ion dependency to the NEURON block + neuron_block.ions.push_back(std::move(ion)); + } break; + + case tok::nonspecific_current: + // Assume that there is one non-specific current per mechanism. + // It would be easy to extend this to multiple currents, + // however there are no mechanisms in the CoreNeuron repository + // that do this + { + get_token(); // consume NONSPECIFIC_CURRENT + + auto tok = token_; + + // parse the current name and check for errors + auto id = parse_identifier(); + if (status_ == lexerStatus::error) { + return; + } + + // store the token with nonspecific current's name and location + neuron_block.nonspecific_current = tok; + } + break; + + // the parser encountered an invalid symbol + default: + error(pprintf("there was an invalid statement '%' in NEURON block", + token_.spelling)); + return; } } @@ -379,7 +368,7 @@ void Parser::parse_state_block() { get_token(); // assert that the block starts with a curly brace - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("STATE block must start with a curly brace {, found '%'", token_.spelling)); return; } @@ -388,20 +377,20 @@ void Parser::parse_state_block() { // to count them we have to get the next token before entering the loop to // handle the case of an empty block {} get_token(); - while(token_.type!=tok::rbrace && token_.type != tok::eof) { + while (token_.type != tok::rbrace && token_.type != tok::eof) { int line = location_.line; Id parm; - if(token_.type != tok::identifier) { + if (token_.type != tok::identifier) { error(pprintf("'%' is not a valid name for a state variable", - token_.spelling)); + token_.spelling)); return; } parm.token = token_; get_token(); - if(token_.type == tok::from) { + if (token_.type == tok::from) { // silently skips from/to from_to_description(); if (status_ == lexerStatus::error) { @@ -414,7 +403,7 @@ void Parser::parse_state_block() { parm.units = unit_description(); if (status_ == lexerStatus::error) { error(pprintf("STATUS block unexpected symbol '%s'", - token_.spelling)); + token_.spelling)); return; } } @@ -436,20 +425,20 @@ void Parser::parse_units_block() { get_token(); // assert that the block starts with a curly brace - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("UNITS block must start with a curly brace {, found '%'", token_.spelling)); return; } // there are no use cases for curly brace in a UNITS block, so we don't have to count them get_token(); - while(token_.type!=tok::rbrace) { + while (token_.type != tok::rbrace) { // get the alias std::vector<Token> lhs = unit_description(); - if( status_!=lexerStatus::happy ) return; + if (status_ != lexerStatus::happy) return; // consume the '=' sign - if( token_.type!=tok::eq ) { + if (token_.type != tok::eq) { error(pprintf("expected '=', found '%'", token_.spelling)); return; } @@ -458,7 +447,7 @@ void Parser::parse_units_block() { // get the units std::vector<Token> rhs = unit_description(); - if( status_!=lexerStatus::happy ) return; + if (status_ != lexerStatus::happy) return; // store the unit definition units_block.unit_aliases.push_back({lhs, rhs}); @@ -485,7 +474,7 @@ void Parser::parse_parameter_block() { get_token(); // assert that the block starts with a curly brace - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("PARAMETER block must start with a curly brace {, found '%'", token_.spelling)); return; } @@ -493,12 +482,12 @@ void Parser::parse_parameter_block() { int success = 1; // there are no use cases for curly brace in a UNITS block, so we don't have to count them get_token(); - while(token_.type!=tok::rbrace && token_.type!=tok::eof) { + while (token_.type != tok::rbrace && token_.type != tok::eof) { int line = location_.line; Id parm; // read the parameter name - if(token_.type != tok::identifier) { + if (token_.type != tok::identifier) { success = 0; goto parm_exit; } @@ -507,28 +496,28 @@ void Parser::parse_parameter_block() { get_token(); // look for equality - if(token_.type==tok::eq) { + if (token_.type == tok::eq) { get_token(); // consume '=' parm.value = value_literal(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { success = 0; goto parm_exit; } } // get the units - if(line==location_.line && token_.type == tok::lparen) { + if (line == location_.line && token_.type == tok::lparen) { parm.units = unit_description(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { success = 0; goto parm_exit; } } // get the range - if(line==location_.line && token_.type == tok::lt) { + if (line == location_.line && token_.type == tok::lt) { parm.range = range_description(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { success = 0; goto parm_exit; } @@ -537,7 +526,7 @@ void Parser::parse_parameter_block() { } // error if EOF before closing curly brace - if(token_.type==tok::eof) { + if (token_.type == tok::eof) { error("PARAMETER block must have closing '}'"); goto parm_exit; } @@ -548,7 +537,7 @@ void Parser::parse_parameter_block() { parm_exit: // only write error message if one hasn't already been logged by the lexer - if(!success && status_==lexerStatus::happy) { + if (!success && status_ == lexerStatus::happy) { error(pprintf("PARAMETER block unexpected symbol '%s'", token_.spelling)); } return; @@ -558,18 +547,18 @@ void Parser::parse_constant_block() { get_token(); // assert that the block starts with a curly brace - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("CONSTANT block must start with a curly brace {, found '%'", token_.spelling)); return; } get_token(); - while(token_.type!=tok::rbrace && token_.type!=tok::eof) { + while (token_.type != tok::rbrace && token_.type != tok::eof) { int line = location_.line; std::string name, value; // read the constant name - if(token_.type != tok::identifier) { + if (token_.type != tok::identifier) { error(pprintf("CONSTANT block unexpected symbol '%s'", token_.spelling)); return; } @@ -578,18 +567,18 @@ void Parser::parse_constant_block() { get_token(); // look for equality - if(token_.type==tok::eq) { + if (token_.type == tok::eq) { get_token(); // consume '=' value = value_literal(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { return; } } // get the units - if(line==location_.line && token_.type == tok::lparen) { + if (line == location_.line && token_.type == tok::lparen) { unit_description(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { return; } } @@ -598,7 +587,7 @@ void Parser::parse_constant_block() { } // error if EOF before closing curly brace - if(token_.type==tok::eof) { + if (token_.type == tok::eof) { error("CONSTANT block must have closing '}'"); return; } @@ -614,7 +603,7 @@ void Parser::parse_assigned_block() { get_token(); // assert that the block starts with a curly brace - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("ASSIGNED block must start with a curly brace {, found '%'", token_.spelling)); return; } @@ -623,41 +612,41 @@ void Parser::parse_assigned_block() { // there are no use cases for curly brace in an ASSIGNED block, so we don't have to count them get_token(); - while(token_.type!=tok::rbrace && token_.type!=tok::eof) { + while (token_.type != tok::rbrace && token_.type != tok::eof) { int line = location_.line; std::vector<Token> variables; // we can have more than one variable on a line // the first token must be ... - if(token_.type != tok::identifier) { + if (token_.type != tok::identifier) { success = 0; goto ass_exit; } // read all of the identifiers until we run out of identifiers or reach a new line - while(token_.type == tok::identifier && line == location_.line) { + while (token_.type == tok::identifier && line == location_.line) { variables.push_back(token_); get_token(); } // there are some parameters at the end of the line - if(line==location_.line && token_.type == tok::lparen) { + if (line == location_.line && token_.type == tok::lparen) { auto u = unit_description(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { success = 0; goto ass_exit; } - for(auto const& t : variables) { + for (auto const& t: variables) { block.parameters.push_back(Id(t, "", u)); } } else { - for(auto const& t : variables) { + for (auto const& t: variables) { block.parameters.push_back(Id(t, "", {})); } } } // error if EOF before closing curly brace - if(token_.type==tok::eof) { + if (token_.type == tok::eof) { error("ASSIGNED block must have closing '}'"); goto ass_exit; } @@ -668,7 +657,7 @@ void Parser::parse_assigned_block() { ass_exit: // only write error message if one hasn't already been logged by the lexer - if(!success && status_==lexerStatus::happy) { + if (!success && status_ == lexerStatus::happy) { error(pprintf("ASSIGNED block unexpected symbol '%'", token_.spelling)); } return; @@ -679,7 +668,7 @@ ass_exit: std::string Parser::value_literal() { bool negate = false; - if(token_.type==tok::minus) { + if (token_.type == tok::minus) { negate = true; get_token(); } @@ -688,7 +677,7 @@ std::string Parser::value_literal() { // Remove double negation auto v = constants_map_.at(token_.spelling); if (v.at(0) == '-' && negate) { - v.erase(0,1); + v.erase(0, 1); negate = false; } auto value = negate ? "-" + v : v; @@ -696,7 +685,7 @@ std::string Parser::value_literal() { return value; } - if(token_.type != tok::integer && token_.type != tok::real) { + if (token_.type != tok::integer && token_.type != tok::real) { error(pprintf("numeric constant not an integer or real number '%'", token_)); return ""; } @@ -712,14 +701,14 @@ std::string Parser::value_literal() { int Parser::value_signed_integer() { std::string value; - if(token_.type==tok::minus) { + if (token_.type == tok::minus) { value = "-"; get_token(); } - else if(token_.type==tok::plus) { + else if (token_.type == tok::plus) { get_token(); } - if(token_.type != tok::integer) { + if (token_.type != tok::integer) { error(pprintf("numeric constant not an integer '%'", token_)); return 0; } @@ -736,16 +725,16 @@ std::vector<Token> Parser::unit_description() { std::vector<Token> tokens; // check that we start with a left parenthesis - if(token_.type != tok::lparen) { + if (token_.type != tok::lparen) { error(pprintf("unit description must start with a parenthesis '%'", token_)); goto unit_exit; } get_token(); - while(token_.type != tok::rparen) { + while (token_.type != tok::rparen) { // check for illegal tokens or a new line - if( !is_in(token_.type,legal_tokens) || startline < location_.line ) { + if (!is_in(token_.type, legal_tokens) || startline < location_.line) { error(pprintf("incorrect unit description '%'", token_)); goto unit_exit; } @@ -761,36 +750,26 @@ unit_exit: return tokens; } -std::pair<Token, Token> Parser::range_description() { - Token lb, ub; +std::pair<std::string, std::string> Parser::range_description() { + std::string lb, ub; - if(token_.type != tok::lt) { + if (token_.type != tok::lt) { error(pprintf("range description must start with a left angle bracket '%'", token_)); return {}; } get_token(); - if(token_.type != tok::integer) { - error(pprintf("range description must be <int, int>, found '%'", token_)); - return {}; - } - lb = token_; + lb = value_literal(); - get_token(); - if(token_.type != tok::comma) { + if (token_.type != tok::comma) { error(pprintf("range description must separate lower and upper bound with a comma '%'", token_)); return {}; } get_token(); - if(token_.type != tok::integer) { - error(pprintf("range description must be <int, int>, found '%'", token_)); - return {}; - } - ub = token_; + ub = value_literal(); - get_token(); - if(token_.type != tok::gt) { + if (token_.type != tok::gt) { error(pprintf("range description must end with a right angle bracket '%'", token_)); return {}; } @@ -799,47 +778,36 @@ std::pair<Token, Token> Parser::range_description() { return {lb, ub}; } -std::pair<Token, Token> Parser::from_to_description() { - Token lb, ub; +std::pair<std::string, std::string> Parser::from_to_description() { + std::string lb, ub; - if(token_.type != tok::from) { - error(pprintf("range description must be of form FROM <int> TO <int>, found '%'", token_)); + if (token_.type != tok::from) { + error(pprintf("range description must be of form FROM <number> TO <number>, found '%'", token_)); return {}; } get_token(); - if(token_.type != tok::integer) { - error(pprintf("range description must be of form FROM <int> TO <int>, found '%'", token_)); - return {}; - } - lb = token_; + lb = value_literal(); - get_token(); - if(token_.type != tok::to) { - error(pprintf("range description must be of form FROM <int> TO <int>, found '%'", token_)); + if (token_.type != tok::to) { + error(pprintf("range description must be of form FROM <number> TO <number>, found '%'", token_)); return {}; } get_token(); - if(token_.type != tok::integer) { - error(pprintf("range description must be of form FROM <int> TO <int>, found '%'", token_)); - return {}; - } - ub = token_; + ub = value_literal(); - get_token(); return {lb, ub}; } - // Returns a prototype expression for a function or procedure call // Takes an optional argument that allows the user to specify the // name of the prototype, which is used for prototypes where the name // is implcitly defined (e.g. INITIAL and BREAKPOINT blocks) -expression_ptr Parser::parse_prototype(std::string name=std::string()) { +expression_ptr Parser::parse_prototype(std::string name = std::string()) { Token identifier = token_; - if(name.size()) { + if (name.size()) { // we assume that the current token_ is still pointing at // the keyword, i.e. INITIAL or BREAKPOINT identifier.type = tok::identifier; @@ -851,17 +819,16 @@ expression_ptr Parser::parse_prototype(std::string name=std::string()) { // check for an argument list enclosed in parenthesis (...) // return a prototype with an empty argument list if not found - if( token_.type != tok::lparen ) { + if (token_.type != tok::lparen) { return expression_ptr{new PrototypeExpression(identifier.location, identifier.spelling, {})}; } get_token(); // consume '(' std::vector<Token> arg_tokens; - while(token_.type != tok::rparen) { + while (token_.type != tok::rparen) { // check identifier - if(token_.type != tok::identifier) { - error( "expected a valid identifier, found '" - + yellow(token_.spelling) + "'"); + if (token_.type != tok::identifier) { + error("expected a valid identifier, found '" + yellow(token_.spelling) + "'"); return nullptr; } @@ -870,26 +837,25 @@ expression_ptr Parser::parse_prototype(std::string name=std::string()) { get_token(); // consume the identifier // args may have a unit attached - if(token_.type == tok::lparen) { + if (token_.type == tok::lparen) { unit_description(); - if(status_ == lexerStatus::error) { + if (status_ == lexerStatus::error) { return {}; } } // look for a comma - if(!(token_.type == tok::comma || token_.type==tok::rparen)) { - error( "expected a comma or closing parenthesis, found '" - + yellow(token_.spelling) + "'"); + if (!(token_.type == tok::comma || token_.type == tok::rparen)) { + error("expected a comma or closing parenthesis, found '" + yellow(token_.spelling) + "'"); return nullptr; } - if(token_.type == tok::comma) { + if (token_.type == tok::comma) { get_token(); // consume ',' } } - if(token_.type != tok::rparen) { + if (token_.type != tok::rparen) { error("procedure argument list must have closing parenthesis ')'"); return nullptr; } @@ -897,12 +863,11 @@ expression_ptr Parser::parse_prototype(std::string name=std::string()) { // pack the arguments into LocalDeclarations std::vector<expression_ptr> arg_expressions; - for(auto const& t : arg_tokens) { + for (auto const& t: arg_tokens) { arg_expressions.emplace_back(make_expression<ArgumentExpression>(t.location, t)); } - return make_expression<PrototypeExpression> - (identifier.location, identifier.spelling, std::move(arg_expressions)); + return make_expression<PrototypeExpression>(identifier.location, identifier.spelling, std::move(arg_expressions)); } void Parser::parse_title() { @@ -910,10 +875,7 @@ void Parser::parse_title() { int this_line = location().line; Token tkn = peek(); - while( tkn.location.line==this_line - && tkn.type!=tok::eof - && status_==lexerStatus::happy) - { + while (tkn.location.line == this_line && tkn.type != tok::eof && status_ == lexerStatus::happy) { get_token(); title += token_.spelling; tkn = peek(); @@ -933,67 +895,65 @@ symbol_ptr Parser::parse_procedure() { expression_ptr p; procedureKind kind = procedureKind::normal; - switch( token_.type ) { - case tok::derivative: - kind = procedureKind::derivative; - get_token(); // consume keyword token - if( !expect( tok::identifier ) ) return nullptr; - p = parse_prototype(); - break; - case tok::kinetic: - kind = procedureKind::kinetic; - get_token(); // consume keyword token - if( !expect( tok::identifier ) ) return nullptr; - p = parse_prototype(); - break; - case tok::linear: - kind = procedureKind::linear; - get_token(); // consume keyword token - if( !expect( tok::identifier ) ) return nullptr; - p = parse_prototype(); - break; - case tok::procedure: - kind = procedureKind::normal; - get_token(); // consume keyword token - if( !expect( tok::identifier ) ) return nullptr; - p = parse_prototype(); - break; - case tok::initial: - kind = procedureKind::initial; - p = parse_prototype("initial"); - break; - case tok::breakpoint: - kind = procedureKind::breakpoint; - p = parse_prototype("breakpoint"); - break; - case tok::net_receive: - kind = procedureKind::net_receive; - p = parse_prototype("net_receive"); - break; - default: - // it is a compiler error if trying to parse_procedure() without - // having DERIVATIVE, KINETIC, PROCEDURE, INITIAL or BREAKPOINT keyword - throw compiler_exception( - "attempt to parse_procedure() without {DERIVATIVE,KINETIC,PROCEDURE,INITIAL,BREAKPOINT}", - location_); + switch (token_.type) { + case tok::derivative: + kind = procedureKind::derivative; + get_token(); // consume keyword token + if (!expect(tok::identifier)) return nullptr; + p = parse_prototype(); + break; + case tok::kinetic: + kind = procedureKind::kinetic; + get_token(); // consume keyword token + if (!expect(tok::identifier)) return nullptr; + p = parse_prototype(); + break; + case tok::linear: + kind = procedureKind::linear; + get_token(); // consume keyword token + if (!expect(tok::identifier)) return nullptr; + p = parse_prototype(); + break; + case tok::procedure: + kind = procedureKind::normal; + get_token(); // consume keyword token + if (!expect(tok::identifier)) return nullptr; + p = parse_prototype(); + break; + case tok::initial: + kind = procedureKind::initial; + p = parse_prototype("initial"); + break; + case tok::breakpoint: + kind = procedureKind::breakpoint; + p = parse_prototype("breakpoint"); + break; + case tok::net_receive: + kind = procedureKind::net_receive; + p = parse_prototype("net_receive"); + break; + default: + // it is a compiler error if trying to parse_procedure() without + // having DERIVATIVE, KINETIC, PROCEDURE, INITIAL or BREAKPOINT keyword + throw compiler_exception( + "attempt to parse_procedure() without {DERIVATIVE,KINETIC,PROCEDURE,INITIAL,BREAKPOINT}", + location_); } - if(p==nullptr) return nullptr; + if (p == nullptr) return nullptr; // check for opening left brace { - if(!expect(tok::lbrace)) return nullptr; + if (!expect(tok::lbrace)) return nullptr; // parse the body of the function expression_ptr body = parse_block(false); - if(body==nullptr) return nullptr; + if (body == nullptr) return nullptr; auto proto = p->is_prototype(); - if(kind != procedureKind::net_receive) { - return make_symbol<ProcedureExpression> - (proto->location(), proto->name(), std::move(proto->args()), std::move(body), kind); + if (kind != procedureKind::net_receive) { + return make_symbol<ProcedureExpression>(proto->location(), proto->name(), std::move(proto->args()), std::move(body), kind); } else { - return make_symbol<NetReceiveExpression> - (proto->location(), proto->name(), std::move(proto->args()), std::move(body)); + return make_symbol<NetReceiveExpression>(proto->location(), proto->name(), std::move(proto->args()), std::move(body)); } } @@ -1001,22 +961,21 @@ symbol_ptr Parser::parse_function() { get_token(); // consume FUNCTION token // check that a valid identifier name was specified by the user - if( !expect( tok::identifier ) ) return nullptr; + if (!expect(tok::identifier)) return nullptr; // parse the prototype auto p = parse_prototype(); - if(p==nullptr) return nullptr; + if (p == nullptr) return nullptr; // check for opening left brace { - if(!expect(tok::lbrace)) return nullptr; + if (!expect(tok::lbrace)) return nullptr; // parse the body of the function auto body = parse_block(false); - if(body==nullptr) return nullptr; + if (body == nullptr) return nullptr; - PrototypeExpression *proto = p->is_prototype(); - return make_symbol<FunctionExpression> - (proto->location(), proto->name(), std::move(proto->args()), std::move(body)); + PrototypeExpression* proto = p->is_prototype(); + return make_symbol<FunctionExpression>(proto->location(), proto->name(), std::move(proto->args()), std::move(body)); } // this is the first port of call when parsing a new line inside a verb block @@ -1024,30 +983,30 @@ symbol_ptr Parser::parse_function() { // :: LOCAL identifier // :: expression expression_ptr Parser::parse_statement() { - switch(token_.type) { - case tok::if_stmt : - return parse_if(); - break; - case tok::conductance : - return parse_conductance(); - case tok::solve : - return parse_solve(); - case tok::local : - return parse_local(); - case tok::identifier : - return parse_line_expression(); - case tok::conserve : - return parse_conserve_expression(); - case tok::compartment : - return parse_compartment_statement(); - case tok::tilde : - return parse_tilde_expression(); - case tok::initial : - // only used for INITIAL block in NET_RECEIVE - return parse_initial(); - default: - error(pprintf("unexpected token type % '%'", token_string(token_.type), token_.spelling)); - return nullptr; + switch (token_.type) { + case tok::if_stmt: + return parse_if(); + break; + case tok::conductance: + return parse_conductance(); + case tok::solve: + return parse_solve(); + case tok::local: + return parse_local(); + case tok::identifier: + return parse_line_expression(); + case tok::conserve: + return parse_conserve_expression(); + case tok::compartment: + return parse_compartment_statement(); + case tok::tilde: + return parse_tilde_expression(); + case tok::initial: + // only used for INITIAL block in NET_RECEIVE + return parse_initial(); + default: + error(pprintf("unexpected token type % '%'", token_string(token_.type), token_.spelling)); + return nullptr; } return nullptr; } @@ -1082,7 +1041,7 @@ expression_ptr Parser::parse_call() { // check for a function call // assert this is so - if(token_.type != tok::lparen) { + if (token_.type != tok::lparen) { throw compiler_exception( "should not be parsing parse_call without trailing '('", location_); @@ -1093,23 +1052,23 @@ expression_ptr Parser::parse_call() { // parse a function call get_token(); // consume '(' - while(token_.type != tok::rparen) { + while (token_.type != tok::rparen) { auto e = parse_expression(); - if(!e) return e; + if (!e) return e; args.emplace_back(std::move(e)); // reached the end of the argument list - if(token_.type == tok::rparen) break; + if (token_.type == tok::rparen) break; // insist on a comma between arguments - if( !expect(tok::comma, "call arguments must be separated by ','") ) + if (!expect(tok::comma, "call arguments must be separated by ','")) return expression_ptr(); get_token(); // consume ',' } // check that we have a closing parenthesis - if(!expect(tok::rparen, "function call missing closing ')'") ) { + if (!expect(tok::rparen, "function call missing closing ')'")) { return expression_ptr(); } get_token(); // consume ')' @@ -1129,7 +1088,7 @@ expression_ptr Parser::parse_line_expression() { int line = location_.line; expression_ptr lhs; Token next = peek(); - if(next.type == tok::lparen) { + if (next.type == tok::lparen) { lhs = parse_call(); // we have to ensure that a procedure call is alone on the line // to avoid : @@ -1138,42 +1097,45 @@ expression_ptr Parser::parse_line_expression() { // We assume that foo is a procedure call, if it is an eroneous // function call this has to be caught in the second pass. // or optimized away with a warning - if(!lhs) return lhs; - if(location_.line == line && token_.type != tok::eof) { + if (!lhs) return lhs; + if (location_.line == line && token_.type != tok::eof) { error(pprintf( "expected a new line after call expression, found '%'", yellow(token_.spelling))); return expression_ptr(); } - return lhs ; - } else if(next.type == tok::prime) { + return lhs; + } + else if (next.type == tok::prime) { lhs = make_expression<DerivativeExpression>(location_, token_.spelling); // consume both name and derivative operator get_token(); get_token(); // a derivative statement must be followed by '=' - if(token_.type!=tok::eq) { - error("a derivative declaration must have an assignment of the "\ + if (token_.type != tok::eq) { + error("a derivative declaration must have an assignment of the " "form\n x' = expression\n where x is a state variable"); return expression_ptr(); } - } else { + } + else { lhs = parse_unaryop(); } - if(!lhs) { // error + if (!lhs) { // error return lhs; } // we parse a binary expression if followed by an operator - if(token_.type == tok::eq) { - Token op = token_; // save the '=' operator with location - get_token(); // consume the '=' operator + if (token_.type == tok::eq) { + Token op = token_; // save the '=' operator with location + get_token(); // consume the '=' operator return parse_binop(std::move(lhs), op); - } else if(line == location_.line && token_.type != tok::eof){ + } + else if (line == location_.line && token_.type != tok::eof) { error(pprintf("expected an assignment '%' or new line, found '%'", - yellow("="), - yellow(token_.spelling))); + yellow("="), + yellow(token_.spelling))); return nullptr; } @@ -1185,21 +1147,21 @@ expression_ptr Parser::parse_stoich_term() { auto here = location_; bool negative = false; - while(token_.type==tok::minus) { + while (token_.type == tok::minus) { negative = !negative; get_token(); // consume '-' } - if(token_.type==tok::integer) { + if (token_.type == tok::integer) { coeff = parse_integer(); } - if(token_.type!=tok::identifier) { + if (token_.type != tok::identifier) { error(pprintf("expected an identifier, found '%'", yellow(token_.spelling))); return nullptr; } - if(negative) { + if (negative) { coeff = make_expression<IntegerExpression>(here, -coeff->is_integer()->integer_value()); } return make_expression<StoichTermExpression>(here, std::move(coeff), parse_identifier()); @@ -1209,14 +1171,14 @@ expression_ptr Parser::parse_stoich_expression() { std::vector<expression_ptr> terms; auto here = location_; - if(token_.type==tok::integer || token_.type==tok::identifier || token_.type==tok::minus) { + if (token_.type == tok::integer || token_.type == tok::identifier || token_.type == tok::minus) { auto term = parse_stoich_term(); if (!term) return nullptr; terms.push_back(std::move(term)); - while(token_.type==tok::plus || token_.type==tok::minus) { - if (token_.type==tok::plus) { + while (token_.type == tok::plus || token_.type == tok::minus) { + if (token_.type == tok::plus) { get_token(); // consume plus } @@ -1233,7 +1195,7 @@ expression_ptr Parser::parse_stoich_expression() { expression_ptr Parser::parse_tilde_expression() { auto here = location_; - if(token_.type!=tok::tilde) { + if (token_.type != tok::tilde) { error(pprintf("expected '%', found '%'", yellow("~"), yellow(token_.spelling))); return nullptr; } @@ -1249,13 +1211,13 @@ expression_ptr Parser::parse_tilde_expression() { if (auto sterm = term->is_stoich_term()) { if (sterm->negative()) { error(pprintf("expected only non-negative terms in reaction lhs, found '%'", - yellow(term->to_string()))); + yellow(term->to_string()))); return nullptr; } } } - if(token_.type != tok::arrow) { + if (token_.type != tok::arrow) { error(pprintf("expected '%', found '%'", yellow("<->"), yellow(token_.spelling))); return nullptr; } @@ -1269,7 +1231,7 @@ expression_ptr Parser::parse_tilde_expression() { if (auto sterm = term->is_stoich_term()) { if (sterm->negative()) { error(pprintf("expected only non-negative terms in reaction rhs, found '%'", - yellow(term->to_string()))); + yellow(term->to_string()))); return nullptr; } } @@ -1299,13 +1261,12 @@ expression_ptr Parser::parse_tilde_expression() { } get_token(); // consume rparen - return make_expression<ReactionExpression>(here, std::move(lhs), std::move(rhs), - std::move(fwd), std::move(rev)); + return make_expression<ReactionExpression>(here, std::move(lhs), std::move(rhs), std::move(fwd), std::move(rev)); } else if (search_to_eol(tok::eq)) { auto lhs_bin = parse_expression(tok::eq); - if(token_.type!=tok::eq) { + if (token_.type != tok::eq) { error(pprintf("expected '%', found '%'", yellow("="), yellow(token_.spelling))); return nullptr; } @@ -1323,7 +1284,7 @@ expression_ptr Parser::parse_tilde_expression() { expression_ptr Parser::parse_conserve_expression() { auto here = location_; - if(token_.type!=tok::conserve) { + if (token_.type != tok::conserve) { error(pprintf("expected '%', found '%'", yellow("CONSERVE"), yellow(token_.spelling))); return nullptr; } @@ -1332,7 +1293,7 @@ expression_ptr Parser::parse_conserve_expression() { auto lhs = parse_stoich_expression(); if (!lhs) return nullptr; - if(token_.type != tok::eq) { + if (token_.type != tok::eq) { error(pprintf("expected '%', found '%'", yellow("="), yellow(token_.spelling))); return nullptr; } @@ -1346,11 +1307,11 @@ expression_ptr Parser::parse_conserve_expression() { expression_ptr Parser::parse_expression(int prec, tok stop_token) { auto lhs = parse_unaryop(); - if(lhs==nullptr) return nullptr; + if (lhs == nullptr) return nullptr; // Combine all sub-expressions with precedence greater than prec. for (;;) { - if(token_.type==stop_token) { + if (token_.type == stop_token) { return lhs; } @@ -1360,12 +1321,12 @@ expression_ptr Parser::parse_expression(int prec, tok stop_token) { // Note: all tokens that are not infix binary operators have // precidence of -1, so expressions like function calls will short // circuit this loop here. - if(p_op<=prec) return lhs; + if (p_op <= prec) return lhs; get_token(); // consume the infix binary operator lhs = parse_binop(std::move(lhs), op); - if(!lhs) return nullptr; + if (!lhs) return nullptr; } return lhs; @@ -1388,34 +1349,33 @@ expression_ptr Parser::parse_expression(tok t) { expression_ptr Parser::parse_unaryop() { expression_ptr e; Token op = token_; - switch(token_.type) { - case tok::plus : - // plus sign is simply ignored - get_token(); // consume '+' - return parse_unaryop(); - case tok::minus : - get_token(); // consume '-' - e = parse_unaryop(); // handle recursive unary - if(!e) return nullptr; - return unary_expression(token_.location, op.type, std::move(e)); - case tok::exp : - case tok::sin : - case tok::cos : - case tok::log : - case tok::abs : - case tok::safeinv: - case tok::exprelr: - get_token(); // consume operator (exp, sin, cos or log) - if(token_.type!=tok::lparen) { - error( "missing parenthesis after call to " - + yellow(op.spelling) ); - return nullptr; - } - e = parse_unaryop(); // handle recursive unary - if(!e) return nullptr; - return unary_expression(token_.location, op.type, std::move(e)); - default : - return parse_primary(); + switch (token_.type) { + case tok::plus: + // plus sign is simply ignored + get_token(); // consume '+' + return parse_unaryop(); + case tok::minus: + get_token(); // consume '-' + e = parse_unaryop(); // handle recursive unary + if (!e) return nullptr; + return unary_expression(token_.location, op.type, std::move(e)); + case tok::exp: + case tok::sin: + case tok::cos: + case tok::log: + case tok::abs: + case tok::safeinv: + case tok::exprelr: + get_token(); // consume operator (exp, sin, cos or log) + if (token_.type != tok::lparen) { + error("missing parenthesis after call to " + yellow(op.spelling)); + return nullptr; + } + e = parse_unaryop(); // handle recursive unary + if (!e) return nullptr; + return unary_expression(token_.location, op.type, std::move(e)); + default: + return parse_primary(); } return nullptr; } @@ -1428,50 +1388,49 @@ expression_ptr Parser::parse_unaryop() { /// :: parenthesis expression (parsed recursively) /// :: prefix binary operators expression_ptr Parser::parse_primary() { - switch(token_.type) { - case tok::real: - return parse_real(); - case tok::integer: - return parse_integer(); - case tok::identifier: - if( peek().type == tok::lparen ) { - return parse_call(); - } - return parse_identifier(); - case tok::lparen: - return parse_parenthesis_expression(); - case tok::min : - case tok::max : - { - auto op = token_; - // handle infix binary operators, e.g. min(l,r) and max(l,r) - get_token(); // consume operator keyword token - if (token_.type!=tok::lparen) { - error("expected opening parenthesis '('"); - return nullptr; - } - get_token(); // consume ( - auto lhs = parse_expression(); - if (!lhs) return nullptr; + switch (token_.type) { + case tok::real: + return parse_real(); + case tok::integer: + return parse_integer(); + case tok::identifier: + if (peek().type == tok::lparen) { + return parse_call(); + } + return parse_identifier(); + case tok::lparen: + return parse_parenthesis_expression(); + case tok::min: + case tok::max: { + auto op = token_; + // handle infix binary operators, e.g. min(l,r) and max(l,r) + get_token(); // consume operator keyword token + if (token_.type != tok::lparen) { + error("expected opening parenthesis '('"); + return nullptr; + } + get_token(); // consume ( + auto lhs = parse_expression(); + if (!lhs) return nullptr; - if (token_.type!=tok::comma) { - error("expected comma ','"); - return nullptr; - } - get_token(); // consume , + if (token_.type != tok::comma) { + error("expected comma ','"); + return nullptr; + } + get_token(); // consume , - auto rhs = parse_expression(); - if (!rhs) return nullptr; - if (token_.type!=tok::rparen) { - error("expected closing parenthesis ')'"); - return nullptr; - } - get_token(); // consume ) - return binary_expression(op.location, op.type, std::move(lhs), std::move(rhs)); - } - default: // fall through to return nullptr at end of function - error( pprintf( "unexpected token '%' in expression", - yellow(token_.spelling) )); + auto rhs = parse_expression(); + if (!rhs) return nullptr; + if (token_.type != tok::rparen) { + error("expected closing parenthesis ')'"); + return nullptr; + } + get_token(); // consume ) + return binary_expression(op.location, op.type, std::move(lhs), std::move(rhs)); + } + default: // fall through to return nullptr at end of function + error(pprintf("unexpected token '%' in expression", + yellow(token_.spelling))); } return nullptr; @@ -1480,7 +1439,7 @@ expression_ptr Parser::parse_primary() { expression_ptr Parser::parse_parenthesis_expression() { // never call unless at start of parenthesis - if(token_.type!=tok::lparen) { + if (token_.type != tok::lparen) { throw compiler_exception( "attempt to parse a parenthesis_expression() without opening parenthesis", location_); @@ -1491,7 +1450,7 @@ expression_ptr Parser::parse_parenthesis_expression() { auto e = parse_expression(); // check for closing parenthesis ')' - if( !e || !expect(tok::rparen) ) return nullptr; + if (!e || !expect(tok::rparen)) return nullptr; get_token(); // consume ')' @@ -1513,26 +1472,26 @@ expression_ptr Parser::parse_integer() { expression_ptr Parser::parse_binop(expression_ptr&& lhs, Token op_left) { auto p_op_left = binop_precedence(op_left.type); auto rhs = parse_expression(p_op_left); - if(!rhs) return nullptr; + if (!rhs) return nullptr; auto op_right = token_; auto p_op_right = binop_precedence(op_right.type); - bool right_assoc = operator_associativity(op_right.type)==associativityKind::right; + bool right_assoc = operator_associativity(op_right.type) == associativityKind::right; - if(p_op_right>p_op_left) { + if (p_op_right > p_op_left) { throw compiler_exception( "parse_binop() : encountered operator of higher precedence", location_); } - if(p_op_right<p_op_left) { + if (p_op_right < p_op_left) { return binary_expression(op_left.location, op_left.type, std::move(lhs), std::move(rhs)); } get_token(); // consume op_right - if(right_assoc) { + if (right_assoc) { rhs = parse_binop(std::move(rhs), op_right); - if(!rhs) return nullptr; + if (!rhs) return nullptr; return binary_expression(op_left.location, op_left.type, std::move(lhs), std::move(rhs)); } @@ -1553,23 +1512,24 @@ expression_ptr Parser::parse_local() { // create local expression stub auto e = make_expression<LocalDeclaration>(loc); - if(!e) return e; + if (!e) return e; // add symbols - while(1) { - if(!expect(tok::identifier)) return nullptr; + while (1) { + if (!expect(tok::identifier)) return nullptr; // try adding variable name to list - if(!e->is_local_declaration()->add_variable(token_)) { + if (!e->is_local_declaration()->add_variable(token_)) { error(e->error_message()); return nullptr; } get_token(); // consume identifier // look for comma that indicates continuation of the variable list - if(token_.type == tok::comma) { + if (token_.type == tok::comma) { get_token(); - } else { + } + else { break; } } @@ -1592,13 +1552,13 @@ expression_ptr Parser::parse_solve() { get_token(); // consume the SOLVE keyword - if(token_.type != tok::identifier) goto solve_statement_error; + if (token_.type != tok::identifier) goto solve_statement_error; name = token_.spelling; // save name of procedure - get_token(); // consume the procedure identifier + get_token(); // consume the procedure identifier variant = solverVariant::regular; - if(token_.type != tok::method && token_.type != tok::steadystate) { + if (token_.type != tok::method && token_.type != tok::steadystate) { method = solverMethod::none; } else { @@ -1606,7 +1566,7 @@ expression_ptr Parser::parse_solve() { variant = solverVariant::steadystate; } get_token(); // consume the METHOD keyword - switch(token_.type) { + switch (token_.type) { case tok::cnexp: method = solverMethod::cnexp; break; @@ -1620,21 +1580,22 @@ expression_ptr Parser::parse_solve() { get_token(); // consume the method description } // check that the rest of the line was empty - if(line == location_.line) { - if(token_.type != tok::eof) goto solve_statement_error; + if (line == location_.line) { + if (token_.type != tok::eof) goto solve_statement_error; } return make_expression<SolveExpression>(loc, name, method, variant); solve_statement_error: - error( "SOLVE statements must have the form\n" - " SOLVE x METHOD method\n" - " or\n" - " SOLVE x STEADYSTATE sparse\n" - " or\n" - " SOLVE x\n" - "where 'x' is the name of a DERIVATIVE block and " - "'method' is 'cnexp' or 'sparse'", loc); + error("SOLVE statements must have the form\n" + " SOLVE x METHOD method\n" + " or\n" + " SOLVE x STEADYSTATE sparse\n" + " or\n" + " SOLVE x\n" + "where 'x' is the name of a DERIVATIVE block and " + "'method' is 'cnexp' or 'sparse'", + loc); return nullptr; } @@ -1651,31 +1612,32 @@ expression_ptr Parser::parse_conductance() { get_token(); // consume the CONDUCTANCE keyword - if(token_.type != tok::identifier) goto conductance_statement_error; + if (token_.type != tok::identifier) goto conductance_statement_error; name = token_.spelling; // save name of variable - get_token(); // consume the variable identifier + get_token(); // consume the variable identifier - if(token_.type == tok::useion) { + if (token_.type == tok::useion) { get_token(); // consume the USEION keyword - if(token_.type!=tok::identifier) goto conductance_statement_error; + if (token_.type != tok::identifier) goto conductance_statement_error; channel = token_.spelling; get_token(); // consume the ion channel type } // check that the rest of the line was empty - if(line == location_.line) { - if(token_.type != tok::eof) goto conductance_statement_error; + if (line == location_.line) { + if (token_.type != tok::eof) goto conductance_statement_error; } return make_expression<ConductanceExpression>(loc, name, channel); conductance_statement_error: - error( "CONDUCTANCE statements must have the form\n" - " CONDUCTANCE g USEION channel\n" - " or\n" - " CONDUCTANCE g\n" - "where 'g' is the name of a variable, and 'channel' is the type of ion channel", loc); + error("CONDUCTANCE statements must have the form\n" + " CONDUCTANCE g USEION channel\n" + " or\n" + " CONDUCTANCE g\n" + "where 'g' is the name of a variable, and 'channel' is the type of ion channel", + loc); return nullptr; } @@ -1683,34 +1645,34 @@ expression_ptr Parser::parse_if() { Token if_token = token_; get_token(); // consume 'if' - if(!expect(tok::lparen)) return nullptr; + if (!expect(tok::lparen)) return nullptr; // parse the conditional auto cond = parse_parenthesis_expression(); - if(!cond) return nullptr; + if (!cond) return nullptr; // parse the block of the true branch auto true_branch = parse_block(true); - if(!true_branch) return nullptr; + if (!true_branch) return nullptr; // parse the false branch if there is an else expression_ptr false_branch; - if(token_.type == tok::else_stmt) { + if (token_.type == tok::else_stmt) { get_token(); // consume else // handle 'else if {}' case recursively - if(token_.type == tok::if_stmt) { + if (token_.type == tok::if_stmt) { expr_list_type if_block; auto exp = parse_if(); if_block.push_back(std::move(exp)); false_branch = make_expression<BlockExpression>(Location(), std::move(if_block), true); } // we have a closing 'else {}' - else if(token_.type == tok::lbrace) { + else if (token_.type == tok::lbrace) { false_branch = parse_block(true); } else { - error("expect either '"+yellow("if")+"' or '"+yellow("{")+" after else"); + error("expect either '" + yellow("if") + "' or '" + yellow("{") + " after else"); return nullptr; } } @@ -1731,16 +1693,16 @@ expression_ptr Parser::parse_block(bool is_nested) { Location block_location = token_.location; expr_list_type body; - while(token_.type != tok::rbrace) { + while (token_.type != tok::rbrace) { auto e = parse_statement(); - if(!e) return e; + if (!e) return e; - if(is_nested) { - if(e->is_local_declaration()) { + if (is_nested) { + if (e->is_local_declaration()) { error("LOCAL variable declarations are not allowed inside a nested scope"); return nullptr; } - if(e->is_reaction()) { + if (e->is_reaction()) { error("reaction expressions are not allowed inside a nested scope"); return nullptr; } @@ -1749,9 +1711,10 @@ expression_ptr Parser::parse_block(bool is_nested) { body.emplace_back(std::move(e)); } - if(token_.type != tok::rbrace) { + if (token_.type != tok::rbrace) { error(pprintf("could not find closing '%' for else statement that started at ", - yellow("}"), block_location)); + yellow("}"), + block_location)); return nullptr; } get_token(); // consume closing '}' @@ -1768,16 +1731,16 @@ expression_ptr Parser::parse_initial() { get_token(); // consume 'INITIAL' - if(!expect(tok::lbrace)) return nullptr; + if (!expect(tok::lbrace)) return nullptr; get_token(); // consume '{' expr_list_type body; - while(token_.type != tok::rbrace) { + while (token_.type != tok::rbrace) { auto e = parse_statement(); - if(!e) return e; + if (!e) return e; // disallow variable declarations in an INITIAL block - if(e->is_local_declaration()) { + if (e->is_local_declaration()) { error("LOCAL variable declarations are not allowed inside a nested scope"); return nullptr; } @@ -1785,9 +1748,10 @@ expression_ptr Parser::parse_initial() { body.emplace_back(std::move(e)); } - if(token_.type != tok::rbrace) { + if (token_.type != tok::rbrace) { error(pprintf("could not find closing '%' for else statement that started at ", - yellow("}"), block_location)); + yellow("}"), + block_location)); return nullptr; } get_token(); // consume closing '}' @@ -1798,7 +1762,7 @@ expression_ptr Parser::parse_initial() { expression_ptr Parser::parse_compartment_statement() { auto here = location_; - if(token_.type!=tok::compartment) { + if (token_.type != tok::compartment) { error(pprintf("expected '%', found '%'", yellow("COMPARTMENT"), yellow(token_.spelling))); return nullptr; } @@ -1807,18 +1771,17 @@ expression_ptr Parser::parse_compartment_statement() { auto scale_factor = parse_expression(tok::rbrace); if (!scale_factor) return nullptr; - if(token_.type != tok::lbrace) { + if (token_.type != tok::lbrace) { error(pprintf("expected '%', found '%'", yellow("{"), yellow(token_.spelling))); return nullptr; } get_token(); // consume '{' std::vector<expression_ptr> states; - while (token_.type!=tok::rbrace) { + while (token_.type != tok::rbrace) { // check identifier - if(token_.type != tok::identifier) { - error( "expected a valid identifier, found '" - + yellow(token_.spelling) + "'"); + if (token_.type != tok::identifier) { + error("expected a valid identifier, found '" + yellow(token_.spelling) + "'"); return nullptr; } diff --git a/modcc/parser.hpp b/modcc/parser.hpp index a250ee8c5b6a6b866e65604b176d175cae993dd6..0bdf0ea84b5dfaa779f768575dd2fd5c2572f6a9 100644 --- a/modcc/parser.hpp +++ b/modcc/parser.hpp @@ -8,10 +8,9 @@ #include "lexer.hpp" #include "module.hpp" -class Parser : public Lexer { +class Parser: public Lexer { public: - - explicit Parser(Module& m, bool advance=true); + explicit Parser(Module& m, bool advance = true); Parser(std::string const&); bool parse(); @@ -21,7 +20,7 @@ public: expression_ptr parse_integer(); expression_ptr parse_real(); expression_ptr parse_call(); - expression_ptr parse_expression(int prec, tok t=tok::eq); + expression_ptr parse_expression(int prec, tok t = tok::eq); expression_ptr parse_expression(); expression_ptr parse_expression(tok); expression_ptr parse_primary(); @@ -62,14 +61,14 @@ public: std::unordered_map<std::string, std::string> constants_map_; private: - Module *module_; + Module* module_; std::vector<Token> comma_separated_identifiers(); std::vector<Token> unit_description(); std::string value_literal(); int value_signed_integer(); - std::pair<Token, Token> range_description(); - std::pair<Token, Token> from_to_description(); + std::pair<std::string, std::string> range_description(); + std::pair<std::string, std::string> from_to_description(); /// build the identifier list void add_variables_to_symbols(); @@ -80,8 +79,8 @@ private: // disable default and copy assignment Parser(); - Parser(Parser const &); + Parser(Parser const&); - bool expect(tok, const char *str=""); + bool expect(tok, const char* str = ""); bool expect(tok, std::string const& str); }; diff --git a/modcc/printer/infoprinter.cpp b/modcc/printer/infoprinter.cpp index 44e972e137794361a1eec81e2530415cbed1d184..a5dcf6040426b70395de4e1c9fd4c8c1574754c1 100644 --- a/modcc/printer/infoprinter.cpp +++ b/modcc/printer/infoprinter.cpp @@ -13,7 +13,9 @@ using io::quote; struct id_field_info { - id_field_info(const Id& id, const char* kind): id(id), kind(kind) {} + id_field_info(const Id& id, const char* kind): + id(id), + kind(kind) {} const Id& id; const char* kind; @@ -24,10 +26,10 @@ std::ostream& operator<<(std::ostream& out, const id_field_info& wrap) { out << "{" << quote(id.name()) << ", " << "{spec::" << wrap.kind << ", " << quote(id.unit_string()) << ", " - << (id.has_value()? id.value: "0"); + << (id.has_value() ? id.value : "0"); if (id.has_range()) { - out << ", " << id.range.first.spelling << "," << id.range.second.spelling; + out << ", " << id.range.first << "," << id.range.second; } out << "}}"; @@ -35,7 +37,8 @@ std::ostream& operator<<(std::ostream& out, const id_field_info& wrap) { } struct ion_dep_info { - ion_dep_info(const IonDep& ion): ion(ion) {} + ion_dep_info(const IonDep& ion): + ion(ion) {} const IonDep& ion; }; @@ -45,13 +48,13 @@ std::ostream& operator<<(std::ostream& out, const ion_dep_info& wrap) { const IonDep& ion = wrap.ion; return out << "{\"" << ion.name << "\", {" - << boolalpha[ion.writes_concentration_int()] << ", " - << boolalpha[ion.writes_concentration_ext()] << ", " - << boolalpha[ion.uses_rev_potential()] << ", " - << boolalpha[ion.writes_rev_potential()] << ", " - << boolalpha[ion.uses_valence()] << ", " - << boolalpha[ion.verifies_valence()] << ", " - << ion.expected_valence << "}}"; + << boolalpha[ion.writes_concentration_int()] << ", " + << boolalpha[ion.writes_concentration_ext()] << ", " + << boolalpha[ion.uses_rev_potential()] << ", " + << boolalpha[ion.writes_rev_potential()] << ", " + << boolalpha[ion.uses_valence()] << ", " + << boolalpha[ion.verifies_valence()] << ", " + << ion.expected_valence << "}}"; } std::string build_info_header(const Module& m, const printer_options& opt) { @@ -69,29 +72,28 @@ std::string build_info_header(const Module& m, const printer_options& opt) { io::pfxstringstream out; - out << - "#pragma once\n" - "#include <memory>\n" - "\n" - "#include <" << arb_header_prefix() << "mechanism.hpp>\n" - "#include <" << arb_header_prefix() << "mechinfo.hpp>\n" - "\n" - << namespace_declaration_open(ns_components) << - "\n" - "template <typename Backend>\n" - "::arb::concrete_mech_ptr<Backend> make_mechanism_" << name << "();\n" - "\n" - "inline const ::arb::mechanism_info& mechanism_" << name << "_info() {\n" + out << "#pragma once\n" + "#include <memory>\n" + "\n" + "#include <" + << arb_header_prefix() << "mechanism.hpp>\n" + "#include <" + << arb_header_prefix() << "mechinfo.hpp>\n" + "\n" + << namespace_declaration_open(ns_components) << "\n" + "template <typename Backend>\n" + "::arb::concrete_mech_ptr<Backend> make_mechanism_" + << name << "();\n" + "\n" + "inline const ::arb::mechanism_info& mechanism_" + << name << "_info() {\n" << indent; - any_fields && out << - "using spec = ::arb::mechanism_field_spec;\n"; + any_fields&& out << "using spec = ::arb::mechanism_field_spec;\n"; - out << - "static ::arb::mechanism_info info = {\n" - << indent << - "// globals\n" - "{\n" + out << "static ::arb::mechanism_info info = {\n" + << indent << "// globals\n" + "{\n" << indent; io::separator sep(",\n"); @@ -99,8 +101,7 @@ std::string build_info_header(const Module& m, const printer_options& opt) { out << sep << id_field_info(id, "global"); } - out << popindent << - "\n},\n// parameters\n{\n" + out << popindent << "\n},\n// parameters\n{\n" << indent; sep.reset(); @@ -108,8 +109,7 @@ std::string build_info_header(const Module& m, const printer_options& opt) { out << sep << id_field_info(id, "parameter"); } - out << popindent << - "\n},\n// state variables\n{\n" + out << popindent << "\n},\n// state variables\n{\n" << indent; sep.reset(); @@ -117,8 +117,7 @@ std::string build_info_header(const Module& m, const printer_options& opt) { out << sep << id_field_info(id, "state"); } - out << popindent << - "\n},\n// ion dependencies\n{\n" + out << popindent << "\n},\n// ion dependencies\n{\n" << indent; sep.reset(); @@ -128,16 +127,16 @@ std::string build_info_header(const Module& m, const printer_options& opt) { std::string fingerprint = "<placeholder>"; out << popindent << "\n" - "},\n" - "// fingerprint\n" << quote(fingerprint) << ",\n" - "// linear, homogeneous mechanism\n" << m.is_linear() << "\n" - << popindent << - "};\n" - "\n" - "return info;\n" - << popindent << - "}\n" - "\n" + "},\n" + "// fingerprint\n" + << quote(fingerprint) << ",\n" + "// linear, homogeneous mechanism\n" + << m.is_linear() << "\n" + << popindent << "};\n" + "\n" + "return info;\n" + << popindent << "}\n" + "\n" << namespace_declaration_close(ns_components); return out.str(); diff --git a/test/unit-modcc/mod_files/test0.mod b/test/unit-modcc/mod_files/test0.mod index 4fddb95eedcbdf297665b4f72776ae8898af9bea..154def03a93f139ed605046f034d90a06a20272f 100644 --- a/test/unit-modcc/mod_files/test0.mod +++ b/test/unit-modcc/mod_files/test0.mod @@ -4,7 +4,8 @@ NEURON { THREADSAFE SUFFIX test - USEION ca WRITE ik READ ki, cai + USEION ca READ cai + USEION k WRITE ik READ ki RANGE gkbar, ik, ek, ki, cai GLOBAL minf, mtau, hinf, htau } @@ -30,9 +31,9 @@ PARAMETER { v (mV) ? another style of comment vhalfm =-43 (mV) km =8 - vhalfh =-67 (mV) + vhalfh =-67 (mV) <0,1000> kh =7.3 - q10 =2.3 + q10 =2.3 <0,42> } ASSIGNED { @@ -48,12 +49,13 @@ BREAKPOINT { } INITIAL { - trates(v) + trates(v, celsius) m=minf h=hinf + ik = h } -PROCEDURE trates(v) { +PROCEDURE trates(v, celsius) { LOCAL qt qt=q10^((celsius-22)/10) minf=1-1/(1+exp((v-vhalfm)/km)) @@ -86,7 +88,7 @@ PROCEDURE foo3() {} : should states be a procedure with special declaration syntax (takes no arguments by default)? DERIVATIVE states { - trates(v) + trates(v, celsius) m' = (minf-m)/mtau h' = (hinf-h)/htau } diff --git a/test/unit-modcc/test_module.cpp b/test/unit-modcc/test_module.cpp index e71964525f99d5b607df91c4b3780a7af032358f..74040532615d59ffa7a38c354d3289e250f9aa1e 100644 --- a/test/unit-modcc/test_module.cpp +++ b/test/unit-modcc/test_module.cpp @@ -1,25 +1,84 @@ #include "common.hpp" #include "io/bulkio.hpp" #include "module.hpp" +#include <unordered_map> TEST(Module, open) { Module m(io::read_all(DATADIR "/mod_files/test0.mod"), "test0.mod"); - if(!m.buffer().size()) { + if (!m.buffer().size()) { std::cout << "skipping Module.open test because unable to open input file" << std::endl; return; } Lexer lexer(m.buffer()); auto t = lexer.parse(); - while(t.type != tok::eof) { + while (t.type != tok::eof) { t = lexer.parse(); EXPECT_NE(t.type, tok::reserved); } } +TEST(Module, ion_deps) { + Module m(io::read_all(DATADIR "/mod_files/test0.mod"), "test0.mod"); + EXPECT_NE(m.buffer().size(), 0); + + Parser p(m, false); + EXPECT_TRUE(p.parse()); + + EXPECT_TRUE(m.has_ion("k")); + auto k_dep = m.find_ion("k"); + EXPECT_TRUE(k_dep->writes_current()); + EXPECT_TRUE(k_dep->uses_current()); + EXPECT_FALSE(k_dep->uses_rev_potential()); + EXPECT_TRUE(k_dep->uses_concentration_int()); + EXPECT_FALSE(k_dep->uses_concentration_ext()); + EXPECT_TRUE(k_dep->writes_current()); + EXPECT_FALSE(k_dep->writes_concentration_int()); + EXPECT_FALSE(k_dep->writes_concentration_ext()); + EXPECT_FALSE(k_dep->writes_rev_potential()); + EXPECT_FALSE(k_dep->uses_valence()); + EXPECT_FALSE(k_dep->verifies_valence()); + + EXPECT_TRUE(m.has_ion("ca")); + auto ca_dep = m.find_ion("ca"); + EXPECT_FALSE(ca_dep->writes_current()); + EXPECT_FALSE(ca_dep->uses_current()); + EXPECT_FALSE(ca_dep->uses_rev_potential()); + EXPECT_TRUE(ca_dep->uses_concentration_int()); + EXPECT_FALSE(ca_dep->uses_concentration_ext()); + EXPECT_FALSE(ca_dep->writes_current()); + EXPECT_FALSE(ca_dep->writes_concentration_int()); + EXPECT_FALSE(ca_dep->writes_concentration_ext()); + EXPECT_FALSE(ca_dep->writes_rev_potential()); + EXPECT_FALSE(ca_dep->uses_valence()); + EXPECT_FALSE(ca_dep->verifies_valence()); +} + +TEST(Module, identifiers) { + Module m(io::read_all(DATADIR "/mod_files/test0.mod"), "test0.mod"); + EXPECT_NE(m.buffer().size(), 0); + + Parser p(m, false); + EXPECT_TRUE(p.parse()); + + std::unordered_map<std::string, std::pair<std::string, bool>> expected{ + {"cai", {"", false}}, + {"vhalfh", {"mV", true}}, + {"q10", {"", true}}, + {"gkbar", {"mho / cm2", false}}}; + + for (const auto& parm: m.parameter_block().parameters) { + auto it = expected.find(parm.name()); + if (it != expected.end()) { + const auto& [units, rangep] = it->second; + EXPECT_EQ(units, parm.unit_string()); + EXPECT_EQ(rangep, parm.has_range()); + } + } +} + TEST(Module, linear_mechanisms) { - for(int i = 1; i < 6; i++) - { - auto file_name = "test"+std::to_string(i)+".mod"; + for (int i = 1; i < 6; i++) { + auto file_name = "test" + std::to_string(i) + ".mod"; Module m(io::read_all(DATADIR "/mod_files/" + file_name), file_name); if (!m.buffer().size()) { @@ -35,7 +94,7 @@ TEST(Module, linear_mechanisms) { m.semantic(); - if(i < 3) { + if (i < 3) { EXPECT_TRUE(m.is_linear()); } else { diff --git a/test/unit-modcc/test_parser.cpp b/test/unit-modcc/test_parser.cpp index 2bead45ea7b835db0260cbc586b1690c9bf51c01..3af61a32f0df8f634ef5196f974593361a38c4fa 100644 --- a/test/unit-modcc/test_parser.cpp +++ b/test/unit-modcc/test_parser.cpp @@ -12,7 +12,7 @@ template <typename EPtr> void verbose_print(const EPtr& e, Parser& p, const char* text) { verbose_print(e); - if (p.status()==lexerStatus::error) { + if (p.status() == lexerStatus::error) { verbose_print("in ", red(text), "\t", p.error_message()); } } @@ -21,22 +21,21 @@ template <typename Derived, typename RetUniqPtr> ::testing::AssertionResult check_parse( std::unique_ptr<Derived>& derived, RetUniqPtr (Parser::*pmemfn)(), - const char* text) -{ + const char* text) { Parser p(text); auto e = (p.*pmemfn)(); verbose_print(e, p, text); - if (e==nullptr) { + if (e == nullptr) { return ::testing::AssertionFailure() << "failed to parse '" << text << "'"; } - if (p.status()!=lexerStatus::happy) { + if (p.status() != lexerStatus::happy) { return ::testing::AssertionFailure() << "parser status is not happy"; } - Derived *ptr = e? dynamic_cast<Derived*>(e.get()): nullptr; - if (ptr==nullptr) { + Derived* ptr = e ? dynamic_cast<Derived*>(e.get()) : nullptr; + if (ptr == nullptr) { return ::testing::AssertionFailure() << "failed to cast to derived type"; } else { @@ -59,11 +58,11 @@ template <typename RetUniqPtr> auto e = (p.*pmemfn)(); verbose_print(e, p, text); - if (p.status()!=lexerStatus::error) { + if (p.status() != lexerStatus::error) { return ::testing::AssertionFailure() << "parser status is not error"; } - if (e!=nullptr) { + if (e != nullptr) { return ::testing::AssertionFailure() << "parser returned non-null expression"; } @@ -72,7 +71,7 @@ template <typename RetUniqPtr> TEST(Parser, full_file) { Module m(io::read_all(DATADIR "/mod_files/test0.mod"), "test0.mod"); - if (m.buffer().size()==0) { + if (m.buffer().size() == 0) { std::cout << "skipping Parser.full_file test because unable to open input file" << std::endl; return; } @@ -91,8 +90,7 @@ TEST(Parser, procedure) { " y = x + y * 2\n" " y = a + b +c + a + b\n" " y = a + b *c + a + b\n" - "}" - , + "}", "PROCEDURE trates(v (mV)) {\n" " LOCAL qt\n" " qt=q10^((celsius-22)/10)\n" @@ -100,8 +98,7 @@ TEST(Parser, procedure) { " hinf=1/(1+exp((v-vhalfh)/kh))\n" " mtau = 0.6\n" " htau = 1500\n" - "}" - }; + "}"}; for (const auto& str: calls) { EXPECT_TRUE(check_parse(&Parser::parse_procedure, str)); @@ -110,17 +107,16 @@ TEST(Parser, procedure) { TEST(Parser, load_constant) { char str[] = { - "CONSTANT {\n" - " t0 = -1.2\n" - " t1 = 0.5\n" - " t2 = -t0\n" - " t3 = -t1\n" - "}" - }; + "CONSTANT {\n" + " t0 = -1.2\n" + " t1 = 0.5\n" + " t2 = -t0\n" + " t3 = -t1\n" + "}"}; Parser p(str); p.parse_constant_block(); - EXPECT_TRUE(p.status()==lexerStatus::happy); + EXPECT_TRUE(p.status() == lexerStatus::happy); EXPECT_TRUE(p.constants_map_.find("t0") != p.constants_map_.end()); EXPECT_EQ("-1.2", p.constants_map_.at("t0")); @@ -137,16 +133,16 @@ TEST(Parser, load_constant) { TEST(Parser, parameters_from_constant) { const char str[] = - "PARAMETER { \n" - " tau = -t0 \n" - " e = t1 \n" - "}"; + "PARAMETER { \n" + " tau = -t0 \n" + " e = t1 \n" + "}"; expression_ptr null; - Module m(str, str+std::strlen(str), ""); + Module m(str, str + std::strlen(str), ""); Parser p(m, false); - p.constants_map_.insert({"t0","-0.5"}); - p.constants_map_.insert({"t1","1.2"}); + p.constants_map_.insert({"t0", "-0.5"}); + p.constants_map_.insert({"t1", "1.2"}); p.parse_parameter_block(); EXPECT_EQ(lexerStatus::happy, p.status()); @@ -161,13 +157,15 @@ TEST(Parser, parameters_from_constant) { TEST(Parser, parameters_range) { const char str[] = - "PARAMETER { \n" - " tau = 0.2 <0,1000> \n" - " rho = 0.2 \n" - "}"; + "PARAMETER { \n" + " tau = 0.2 <0,1000> \n" + " rho = 0.2 \n" + " gamma = 0.2 <-1000,1000> \n" + " sigma = 3 <-2.71,3.14> \n" + "}"; expression_ptr null; - Module m(str, str+std::strlen(str), ""); + Module m(str, str + std::strlen(str), ""); Parser p(m, false); p.parse_parameter_block(); @@ -175,14 +173,14 @@ TEST(Parser, parameters_range) { verbose_print(null, p, str); auto param_block = m.parameter_block(); - EXPECT_EQ("tau", param_block.parameters[0].name()); - EXPECT_EQ("0.2", param_block.parameters[0].value); - EXPECT_EQ("0", param_block.parameters[0].range.first.spelling); - EXPECT_EQ("1000", param_block.parameters[0].range.second.spelling); - EXPECT_EQ("rho", param_block.parameters[1].name()); - EXPECT_EQ("0.2", param_block.parameters[1].value); - EXPECT_EQ("", param_block.parameters[1].range.first.spelling); - EXPECT_EQ("", param_block.parameters[1].range.second.spelling); + EXPECT_EQ("tau", param_block.parameters[0].name()); + EXPECT_EQ("0.2", param_block.parameters[0].value); + EXPECT_EQ("0", param_block.parameters[0].range.first); + EXPECT_EQ("1000", param_block.parameters[0].range.second); + EXPECT_EQ("rho", param_block.parameters[1].name()); + EXPECT_EQ("0.2", param_block.parameters[1].value); + EXPECT_EQ("", param_block.parameters[1].range.first); + EXPECT_EQ("", param_block.parameters[1].range.second); } TEST(Parser, net_receive) { @@ -255,51 +253,43 @@ TEST(Parser, parse_conductance) { TEST(Parser, parse_if) { std::unique_ptr<IfExpression> s; - EXPECT_TRUE(check_parse(s, &Parser::parse_if, - " if(a<b) { \n" - " a = 2+b \n" - " b = 4^b \n" - " } \n" - )); + EXPECT_TRUE(check_parse(s, &Parser::parse_if, " if(a<b) { \n" + " a = 2+b \n" + " b = 4^b \n" + " } \n")); if (s) { EXPECT_NE(s->condition()->is_binary(), nullptr); EXPECT_NE(s->true_branch()->is_block(), nullptr); EXPECT_EQ(s->false_branch(), nullptr); } - EXPECT_TRUE(check_parse(s, &Parser::parse_if, - " if(a<b) { \n" - " a = 2+b \n" - " } else { \n" - " a = 2+b \n" - " } " - )); + EXPECT_TRUE(check_parse(s, &Parser::parse_if, " if(a<b) { \n" + " a = 2+b \n" + " } else { \n" + " a = 2+b \n" + " } ")); if (s) { EXPECT_NE(s->condition()->is_binary(), nullptr); EXPECT_NE(s->true_branch()->is_block(), nullptr); EXPECT_NE(s->false_branch(), nullptr); } - EXPECT_TRUE(check_parse(s, &Parser::parse_if, - " IF(a<b) { \n" - " a = 2+b \n" - " } ELSE { \n" - " a = 2+b \n" - " } " - )); + EXPECT_TRUE(check_parse(s, &Parser::parse_if, " IF(a<b) { \n" + " a = 2+b \n" + " } ELSE { \n" + " a = 2+b \n" + " } ")); if (s) { EXPECT_NE(s->condition()->is_binary(), nullptr); EXPECT_NE(s->true_branch()->is_block(), nullptr); EXPECT_NE(s->false_branch(), nullptr); } - EXPECT_TRUE(check_parse(s, &Parser::parse_if, - " if(fabs(a-b)) { \n" - " a = 2+b \n" - " } else if(b>a){\n" - " a = 2+b \n" - " } " - )); + EXPECT_TRUE(check_parse(s, &Parser::parse_if, " if(fabs(a-b)) { \n" + " a = 2+b \n" + " } else if(b>a){\n" + " a = 2+b \n" + " } ")); if (s) { EXPECT_NE(s->condition()->is_unary(), nullptr); EXPECT_NE(s->true_branch()->is_block(), nullptr); @@ -337,8 +327,7 @@ TEST(Parser, parse_unary_expression) { "(x + -y) ", "-(x - + -y) ", "exp(x + y) ", - "-exp(x + -y) " - }; + "-exp(x + -y) "}; for (auto& text: good_expr) { EXPECT_TRUE(check_parse(&Parser::parse_unaryop, text)); @@ -364,8 +353,8 @@ TEST(Parser, parse_parenthesis_expression) { "(x ", "((x+3) ", "(x+ +) ", - "(x=3) ", // assignment inside parenthesis isn't allowed - "(a + (b*2^(x)) ", // missing closing parenthesis + "(x=3) ", // assignment inside parenthesis isn't allowed + "(a + (b*2^(x)) ", // missing closing parenthesis }; for (auto& text: bad_expr) { @@ -399,14 +388,14 @@ TEST(Parser, parse_line_expression) { } const char* bad_expr[] = { - "x=2+ ", // incomplete binary expression on rhs - "x= ", // missing rhs of assignment + "x=2+ ", // incomplete binary expression on rhs + "x= ", // missing rhs of assignment "x=)y + 2 * z", "x=(y + 2 ", "x=(y ++ z ", - "x/=3 ", // compound binary expressions not supported - "foo+8 ", // missing assignment - "foo()=8 ", // lhs of assingment must be an lvalue + "x/=3 ", // compound binary expressions not supported + "foo+8 ", // missing assignment + "foo()=8 ", // lhs of assingment must be an lvalue }; for (auto& text: bad_expr) { @@ -416,8 +405,7 @@ TEST(Parser, parse_line_expression) { TEST(Parser, parse_stoich_term) { const char* good_pos_expr[] = { - "B", "B3", "3B3", "0A", "12A", "4E" - }; + "B", "B3", "3B3", "0A", "12A", "4E"}; for (auto& text: good_pos_expr) { std::unique_ptr<StoichTermExpression> s; @@ -426,8 +414,7 @@ TEST(Parser, parse_stoich_term) { } const char* good_neg_expr[] = { - "-3B3", "-A", "-12A" - }; + "-3B3", "-A", "-12A"}; for (auto& text: good_neg_expr) { std::unique_ptr<StoichTermExpression> s; @@ -445,8 +432,7 @@ TEST(Parser, parse_stoich_term) { TEST(Parser, parse_stoich_expression) { const char* single_expr[] = { - "B", "B3", "3xy" - }; + "B", "B3", "3xy"}; for (auto& text: single_expr) { std::unique_ptr<StoichExpression> s; @@ -455,8 +441,7 @@ TEST(Parser, parse_stoich_expression) { } const char* double_expr[] = { - "B+A", "a1 + 2bn", "4c+d" - }; + "B+A", "a1 + 2bn", "4c+d"}; for (auto& text: double_expr) { std::unique_ptr<StoichExpression> s; @@ -465,8 +450,7 @@ TEST(Parser, parse_stoich_expression) { } const char* other_good_expr[] = { - "", "a+b+c", "1a-2b+3c+4d" - }; + "", "a+b+c", "1a-2b+3c+4d"}; for (auto& text: other_good_expr) { std::unique_ptr<StoichExpression> s; @@ -478,16 +462,15 @@ TEST(Parser, parse_stoich_expression) { std::unique_ptr<StoichExpression> s; EXPECT_TRUE(check_parse(s, &Parser::parse_stoich_expression, check_coeff)); EXPECT_EQ(4u, s->terms().size()); - std::vector<int> confirm = {-3,2,-1,1}; - for (unsigned i = 0; i<4; ++i) { + std::vector<int> confirm = {-3, 2, -1, 1}; + for (unsigned i = 0; i < 4; ++i) { auto term = s->terms()[i]->is_stoich_term(); EXPECT_EQ(confirm[i], term->coeff()->is_integer()->integer_value()); } } const char* bad_expr[] = { - "A+B+", "A+5+B" - }; + "A+B+", "A+5+B"}; for (auto& text: bad_expr) { EXPECT_TRUE(check_parse_fail(&Parser::parse_stoich_expression, text)); @@ -502,8 +485,7 @@ TEST(Parser, parse_reaction_expression) { "~ <-> C + D + 7 E (k1, f(a,b)-2)", "~ <-> C + D + 7E+F (k1, f(a,b)-2)", "~ <-> (f,g)", - "~ A + 3B + C<-> (f,g)" - }; + "~ A + 3B + C<-> (f,g)"}; for (auto& text: good_expr) { std::unique_ptr<ReactionExpression> s; @@ -572,38 +554,37 @@ TEST(Parser, parse_conserve) { "CONSERVE a + 3*b -c = 1", "CONSERVE a + 3b -c = ", "a+b+c = 2", - "CONSERVE a + 3b +c" - }; + "CONSERVE a + 3b +c"}; for (auto& text: bad_expr) { EXPECT_TRUE(check_parse_fail(&Parser::parse_conserve_expression, text)); } } -double eval(Expression *e) { +double eval(Expression* e) { if (auto n = e->is_number()) { return n->value(); } if (auto b = e->is_binary()) { auto lhs = eval(b->lhs()); auto rhs = eval(b->rhs()); - switch(b->op()) { - case tok::plus : return lhs+rhs; - case tok::minus : return lhs-rhs; - case tok::times : return lhs*rhs; - case tok::divide: return lhs/rhs; - case tok::pow : return std::pow(lhs,rhs); - case tok::min : return std::min(lhs,rhs); - case tok::max : return std::max(lhs,rhs); - default:; + switch (b->op()) { + case tok::plus: return lhs + rhs; + case tok::minus: return lhs - rhs; + case tok::times: return lhs * rhs; + case tok::divide: return lhs / rhs; + case tok::pow: return std::pow(lhs, rhs); + case tok::min: return std::min(lhs, rhs); + case tok::max: return std::max(lhs, rhs); + default:; } } if (auto u = e->is_unary()) { auto val = eval(u->expression()); - switch(u->op()) { - case tok::plus : return val; - case tok::minus : return -val; - default:; + switch (u->op()) { + case tok::plus: return val; + case tok::minus: return -val; + default:; } } return std::numeric_limits<double>::quiet_NaN(); @@ -616,10 +597,10 @@ TEST(Parser, parse_binop) { std::pair<const char*, double> tests[] = { // simple - {"2+3", 2.+3.}, - {"2-3", 2.-3.}, - {"2*3", 2.*3.}, - {"2/3", 2./3.}, + {"2+3", 2. + 3.}, + {"2-3", 2. - 3.}, + {"2*3", 2. * 3.}, + {"2/3", 2. / 3.}, {"2^3", pow(2., 3.)}, {"min(2,3)", 2.}, {"min(3,2)", 2.}, @@ -627,29 +608,28 @@ TEST(Parser, parse_binop) { {"max(3,2)", 3.}, // more complicated - {"2+3*2", 2.+(3*2)}, - {"2*3-5", (2.*3)-5.}, - {"2+3*(-2)", 2.+(3*-2)}, - {"2+3*(-+2)", 2.+(3*-+2)}, - {"2/3*4", (2./3.)*4.}, - {"min(2+3, 4/2)", 4./2}, - {"max(2+3, 4/2)", 2.+3.}, + {"2+3*2", 2. + (3 * 2)}, + {"2*3-5", (2. * 3) - 5.}, + {"2+3*(-2)", 2. + (3 * -2)}, + {"2+3*(-+2)", 2. + (3 * -+2)}, + {"2/3*4", (2. / 3.) * 4.}, + {"min(2+3, 4/2)", 4. / 2}, + {"max(2+3, 4/2)", 2. + 3.}, {"max(2+3, min(12, 24))", 12.}, {"max(min(12, 24), 2+3)", 12.}, - {"2 * 7 - 3 * 11 + 4 * 13", 2.*7.-3.*11.+4.*13.}, + {"2 * 7 - 3 * 11 + 4 * 13", 2. * 7. - 3. * 11. + 4. * 13.}, // right associative - {"2^3^1.5", pow(2.,pow(3.,1.5))}, - {"2^3^1.5^2", pow(2.,pow(3.,pow(1.5,2.)))}, - {"2^2^3", pow(2.,pow(2.,3.))}, - {"(2^2)^3", pow(pow(2.,2.),3.)}, - {"3./2^7.", 3./pow(2.,7.)}, - {"3^2*5.", pow(3.,2.)*5.}, + {"2^3^1.5", pow(2., pow(3., 1.5))}, + {"2^3^1.5^2", pow(2., pow(3., pow(1.5, 2.)))}, + {"2^2^3", pow(2., pow(2., 3.))}, + {"(2^2)^3", pow(pow(2., 2.), 3.)}, + {"3./2^7.", 3. / pow(2., 7.)}, + {"3^2*5.", pow(3., 2.) * 5.}, // multilevel {"1-2*3^4*5^2^3-3^2^3/4/8-5", - 1.-2*pow(3.,4.)*pow(5.,pow(2.,3.))-pow(3,pow(2.,3.))/4./8.-5} - }; + 1. - 2 * pow(3., 4.) * pow(5., pow(2., 3.)) - pow(3, pow(2., 3.)) / 4. / 8. - 5}}; for (const auto& test_case: tests) { std::unique_ptr<Expression> e; @@ -683,12 +663,11 @@ TEST(Parser, parse_state_block) { "STATE {\n" " h FROM 0 TO 1\n" " m r (uA)\n" - "}" - }; + "}"}; expression_ptr null; for (const auto& text: state_blocks) { - Module m(text, text+std::strlen(text), ""); + Module m(text, text + std::strlen(text), ""); Parser p(m, false); p.parse_state_block(); EXPECT_EQ(lexerStatus::happy, p.status());