diff --git a/.gitignore b/.gitignore
index 5c52c84e9050753b6c1f01897330aa8941af28be..cb17f7027d63aa46a67b78006bca015b63726177 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,8 @@
 *.swm
 *.swl
 
+.cache
+
 # tar files
 *.tar
 
diff --git a/arborio/CMakeLists.txt b/arborio/CMakeLists.txt
index 59a1bc30eed72ea59a60cbf3b82a583242016e48..4caba22857fb6fc0f77cb1dc5d45fa8f51d036f7 100644
--- a/arborio/CMakeLists.txt
+++ b/arborio/CMakeLists.txt
@@ -1,4 +1,6 @@
 set(arborio-sources
+    asc_lexer.cpp
+    neurolucida.cpp
     swcio.cpp
 )
 if(ARB_WITH_NEUROML)
diff --git a/arborio/asc_lexer.cpp b/arborio/asc_lexer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a93299efb7af20bd82ded3f4d21d891196332fdd
--- /dev/null
+++ b/arborio/asc_lexer.cpp
@@ -0,0 +1,402 @@
+#include <cctype>
+#include <string>
+
+#include <arborio/neurolucida.hpp>
+
+#include "asc_lexer.hpp"
+
+namespace arborio {
+
+namespace asc {
+
+std::ostream& operator<<(std::ostream& o, const tok& t) {
+    switch (t) {
+        case tok::lparen:
+            return o << "lparen";
+        case tok::rparen:
+            return o << "rparen";
+        case tok::lt:
+            return o << "lt";
+        case tok::gt:
+            return o << "gt";
+        case tok::comma:
+            return o << "comma";
+        case tok::real:
+            return o << "real";
+        case tok::integer:
+            return o << "integer";
+        case tok::symbol:
+            return o << "symbol";
+        case tok::string:
+            return o << "string";
+        case tok::pipe:
+            return o << "pipe";
+        case tok::eof:
+            return o << "eof";
+        case tok::error:
+            return o << "error";
+    }
+    return o << "unknown";
+}
+
+std::ostream& operator<<(std::ostream& o, const src_location& l) {
+    return o << "(src-location " << l.line << " " << l.column << ")";
+}
+
+std::ostream& operator<<(std::ostream& o, const token& t) {
+    const char* spelling = t.spelling.c_str();
+    if      (t.kind==tok::eof)   spelling = "\\0";
+    else if (t.kind==tok::error) spelling = "";
+    return o << "(token " << t.kind << " \"" << spelling << "\" " << t.loc << ")";
+}
+
+inline bool is_alphanumeric(char c) {
+    return std::isalnum(static_cast<unsigned char>(c));
+}
+inline bool is_plusminus(char c) {
+    return (c=='-' || c=='+');
+}
+inline bool is_valid_symbol_char(char c) {
+    switch (c) {
+        case '+':
+        case '-':
+        case '*':
+        case '/':
+        case '@':
+        case '$':
+        case '%':
+        case '^':
+        case '&':
+        case '_':
+        case '=':
+        case '<':
+        case '>':
+        case '~':
+        case '.':
+            return true;
+        default:
+            return is_alphanumeric(c);
+    }
+}
+
+class lexer_impl {
+    const char* line_start_;
+    const char* stream_;
+    unsigned line_;
+    token token_;
+
+public:
+
+    lexer_impl(const char* begin):
+        line_start_(begin), stream_(begin), line_(0)
+    {
+        // Prime the first token.
+        parse();
+    }
+
+    // Return the current token in the stream.
+    const token& current() {
+        return token_;
+    }
+
+    const token& next(unsigned n=1) {
+        while (n--) parse();
+        return token_;
+    }
+
+    token peek(unsigned n) {
+        // Save state.
+        auto ls = line_start_;
+        auto st = stream_;
+        auto l  = line_;
+        auto t  = token_;
+
+        // Advance n tokens.
+        next(n);
+
+        // Restore state.
+        std::swap(t, token_);
+        line_ = l;
+        line_start_ = ls;
+        stream_ = st;
+
+        return t;
+    }
+
+private:
+
+    src_location loc() const {
+        return src_location(line_+1, stream_-line_start_+1);
+    }
+
+    bool empty() const {
+        return *stream_ == '\0';
+    }
+
+    // Consume and return the next token in the stream.
+    void parse() {
+        using namespace std::string_literals;
+
+        while (!empty()) {
+            switch (*stream_) {
+                // white space
+                case ' '    :
+                case '\t'   :
+                case '\v'   :
+                case '\f'   :
+                    ++stream_;
+                    continue;   // skip to next character
+
+                // new line
+                case '\n'   :
+                    line_++;
+                    ++stream_;
+                    line_start_ = stream_;
+                    continue;
+
+                // carriage return (windows new line)
+                case '\r'   :
+                    ++stream_;
+                    if(*stream_ != '\n') {
+                        token_ = {loc(), tok::error, "expected new line after cariage return (bad line ending)"};
+                        return;
+                    }
+                    continue; // catch the new line on the next pass
+
+                // end of file
+                case 0      :
+                    token_ = {loc(), tok::eof, "eof"s};
+                    return;
+
+                case ';':
+                    eat_comment();
+                    continue;
+                case '(':
+                    token_ = {loc(), tok::lparen, {character()}};
+                    return;
+                case ')':
+                    token_ = {loc(), tok::rparen, {character()}};
+                    return;
+                case 'a' ... 'z':
+                case 'A' ... 'Z':
+                    token_ = symbol();
+                    return;
+                case '0' ... '9':
+                    token_ = number();
+                    return;
+                case '"':
+                    token_ = string();
+                    return;
+                case '<':
+                    token_ = {loc(), tok::lt, "<"};
+                    ++stream_;
+                    return;
+                case '>':
+                    token_ = {loc(), tok::gt, ">"};
+                    ++stream_;
+                    return;
+                case ',':
+                    token_ = {loc(), tok::comma, ","};
+                    ++stream_;
+                    return;
+                case '|':
+                    token_ = {loc(), tok::pipe, "|"};
+                    ++stream_;
+                    return;
+                case '-':
+                case '+':
+                case '.':
+                    {
+                        if (empty()) {
+                            token_ = {loc(), tok::error, "Unexpected end of input."};
+                            return;
+                        }
+                        char c = peek_char(1);
+                        if (std::isdigit(c) or c=='.') {
+                            token_ = number();
+                            return;
+                        }
+                    }
+                    token_ = {loc(), tok::error, std::string("Unexpected character '")+character()+"'"};
+                    return;
+
+                default:
+                    token_ = {loc(), tok::error, std::string("Unexpected character '")+character()+"'"};
+                    return;
+            }
+        }
+
+        if (!empty()) {
+            token_ = {loc(), tok::error, "Internal lexer error: expected end of input, please open a bug report"s};
+            return;
+        }
+        token_ = {loc(), tok::eof, "eof"s};
+        return;
+    }
+
+    // Look ahead n characters in the input stream.
+    // If peek to or past the end of the stream return '\0'.
+    char peek_char(int n) {
+        const char* c = stream_;
+        while (*c && n--) ++c;
+        return *c;
+    }
+
+    // Consumes characters in the stream until end of stream or a new line.
+    // Assumes that the current location is the `;` that starts the comment.
+    void eat_comment() {
+        while (!empty() && *stream_!='\n') {
+            ++stream_;
+        }
+    }
+
+    // Parse alphanumeric sequence that starts with an alphabet character,
+    // and my contain alphabet, numeric or one of {+ -  *  /  @  $  %  ^  &  _  =  <  >  ~ .}
+    //
+    // This definition follows the symbol naming conventions of common lisp, without the
+    // use of pipes || to define symbols with arbitrary strings.
+    //
+    // Valid symbols:
+    //    sub_dendrite
+    //    sub-dendrite
+    //    sub-dendrite:
+    //    foo@3.2/lower
+    //    temp_
+    //    branch3
+    //    A
+    // Invalid symbols:
+    //    _cat          ; can't start with underscore
+    //    -cat          ; can't start with hyphen
+    //    2ndvar        ; can't start with numeric character
+    //
+    // Returns the appropriate token kind if symbol is a keyword.
+    token symbol() {
+        using namespace std::string_literals;
+        auto start = loc();
+        std::string symbol;
+        char c = *stream_;
+
+        // Assert that current position is at the start of an identifier
+        if( !(std::isalpha(c)) ) {
+            return {start, tok::error, "Internal error: lexer attempting to read identifier when none is available '.'"s};
+        }
+
+        symbol += c;
+        ++stream_;
+        while(1) {
+            c = *stream_;
+
+            if(is_valid_symbol_char(c)) {
+                symbol += c;
+                ++stream_;
+            }
+            else {
+                break;
+            }
+        }
+
+        return {start, tok::symbol, std::move(symbol)};
+    }
+
+    token string() {
+        using namespace std::string_literals;
+        if (*stream_ != '"') {
+            return {loc(), tok::error, "Internal error: lexer attempting to read identifier when none is available '.'"s};
+        }
+
+        auto start = loc();
+        ++stream_;
+        std::string str;
+        while (!empty() && *stream_!='"') {
+            str.push_back(*stream_);
+            ++stream_;
+        }
+        if (empty()) return {start, tok::error, "string missing closing \""};
+        ++stream_; // gobble the closing "
+
+        return {start, tok::string, str};
+    }
+
+    token number() {
+        using namespace std::string_literals;
+
+        auto start = loc();
+        std::string str;
+        char c = *stream_;
+
+        // Start counting the number of points in the number.
+        auto num_point = (c=='.' ? 1 : 0);
+        auto uses_scientific_notation = 0;
+
+        str += c;
+        ++stream_;
+        while(1) {
+            c = *stream_;
+            if (std::isdigit(c)) {
+                str += c;
+                ++stream_;
+            }
+            else if (c=='.') {
+                if (++num_point>1) {
+                    // Can't have more than one '.' in a number
+                    return {start, tok::error, "unexpected '.'"s};
+                }
+                str += c;
+                ++stream_;
+                if (uses_scientific_notation) {
+                    // Can't have a '.' in the mantissa
+                    return {start, tok::error, "unexpected '.'"s};
+                }
+            }
+            else if (!uses_scientific_notation && (c=='e' || c=='E')) {
+                if ( std::isdigit(peek_char(1)) ||
+                    (is_plusminus(peek_char(1)) && std::isdigit(peek_char(2))))
+                {
+                    uses_scientific_notation++;
+                    str += c;
+                    stream_++;
+                    // Consume the next char if +/-
+                    if (is_plusminus(*stream_)) {
+                        str += *stream_++;
+                    }
+                }
+                else {
+                    // the 'e' or 'E' is the beginning of a new token
+                    break;
+                }
+            }
+            else {
+                break;
+            }
+        }
+
+        const bool is_real = uses_scientific_notation || num_point>0;
+        return {start, (is_real? tok::real: tok::integer), std::move(str)};
+    }
+
+    char character() {
+        return *stream_++;
+    }
+};
+
+lexer::lexer(const char* begin):
+    impl_(new lexer_impl(begin))
+{}
+
+const token& lexer::current() {
+    return impl_->current();
+}
+
+const token& lexer::next(unsigned n) {
+    return impl_->next(n);
+}
+
+token lexer::peek(unsigned n) {
+    return impl_->peek(n);
+}
+
+lexer::~lexer() = default;
+
+} // namespace asc
+
+} // namespace arborio
diff --git a/arborio/asc_lexer.hpp b/arborio/asc_lexer.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6fc60d5df28d42bb802455a83980163f0ff611ed
--- /dev/null
+++ b/arborio/asc_lexer.hpp
@@ -0,0 +1,68 @@
+#pragma once
+
+#include <memory>
+#include <ostream>
+#include <string>
+
+namespace arborio {
+
+namespace asc {
+
+struct src_location {
+    unsigned line = 0;
+    unsigned column = 0;
+
+    src_location() = default;
+
+    src_location(unsigned l, unsigned c):
+        line(l), column(c)
+    {}
+};
+
+std::ostream& operator<<(std::ostream& o, const src_location& l);
+
+enum class tok {
+    lparen,     // left parenthesis '('
+    rparen,     // right parenthesis ')'
+    lt,         // less than '<'
+    gt,         // less than '>'
+    comma,      // comma ','
+    real,       // real number
+    integer,    // integer
+    symbol,     // symbol
+    string,     // string, written as "spelling"
+    pipe,       // pipe '|'
+    eof,        // end of file/input
+    error       // special error state marker
+};
+
+std::ostream& operator<<(std::ostream&, const tok&);
+
+struct token {
+    src_location loc;
+    tok kind;
+    std::string spelling;
+};
+
+std::ostream& operator<<(std::ostream&, const token&);
+
+// Forward declare pimpled implementation.
+class lexer_impl;
+
+class lexer {
+public:
+    lexer(const char* begin);
+
+    const token& current();
+    const token& next(unsigned n=1);
+    token peek(unsigned n=1);
+
+    ~lexer();
+
+private:
+    std::unique_ptr<lexer_impl> impl_;
+};
+
+} // namespace asc
+
+} // namespace arborio
diff --git a/arborio/include/arborio/neurolucida.hpp b/arborio/include/arborio/neurolucida.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4021329d5e5cab8c9bc01dc0a157904b45e9a436
--- /dev/null
+++ b/arborio/include/arborio/neurolucida.hpp
@@ -0,0 +1,45 @@
+#pragma once
+
+#include <cstddef>
+#include <stdexcept>
+#include <string>
+
+#include <arbor/arbexcept.hpp>
+#include <arbor/morph/label_dict.hpp>
+#include <arbor/morph/morphology.hpp>
+
+namespace arborio {
+
+// Common base-class for arborio run-time errors.
+struct asc_exception: public arb::arbor_exception {
+    asc_exception(const std::string& what_arg):
+        arb::arbor_exception(what_arg)
+    {}
+};
+
+// Generic error parsing asc data.
+struct asc_parse_error: asc_exception {
+    asc_parse_error(const std::string& error_msg, unsigned line, unsigned column);
+    std::string message;
+    unsigned line;
+    unsigned column;
+};
+
+// An unsupported ASC description feature was encountered.
+struct asc_unsupported: asc_exception {
+    asc_unsupported(const std::string& error_msg);
+    std::string message;
+};
+
+struct asc_morphology {
+    // Morphology constructed from asc description.
+    arb::morphology morphology;
+
+    // Regions and locsets defined in the asc description.
+    arb::label_dict labels;
+};
+
+// Load asc morphology from file with name filename.
+asc_morphology load_asc(std::string filename);
+
+} // namespace arborio
diff --git a/arborio/neurolucida.cpp b/arborio/neurolucida.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cd231c3ec46fbf271f0a3fc670af6d025c6c9fff
--- /dev/null
+++ b/arborio/neurolucida.cpp
@@ -0,0 +1,698 @@
+#include <cstring>
+#include <iostream>
+#include <fstream>
+#include <numeric>
+
+#include <arbor/util/expected.hpp>
+
+#include <arborio/neurolucida.hpp>
+#include "arbor/arbexcept.hpp"
+#include "arbor/morph/primitives.hpp"
+#include "asc_lexer.hpp"
+
+#include <optional>
+
+namespace arborio {
+
+asc_parse_error::asc_parse_error(const std::string& error_msg, unsigned line, unsigned column):
+    asc_exception("asc parser error (line "+std::to_string(line)+" col "+std::to_string(column)+"): "+error_msg),
+    message(error_msg),
+    line(line),
+    column(column)
+{}
+
+asc_unsupported::asc_unsupported(const std::string& error_msg):
+    asc_exception("unsupported in asc description: "+error_msg),
+    message(error_msg)
+{}
+
+struct parse_error {
+    struct cpp_info {
+        const char* file;
+        int line;
+    };
+
+    std::string msg;
+    asc::src_location loc;
+    std::vector<cpp_info> stack;
+
+    parse_error(std::string m, asc::src_location l, cpp_info cpp):
+        msg(std::move(m)), loc(l)
+    {
+        stack.push_back(cpp);
+    }
+
+    parse_error& append(cpp_info i) {
+        stack.push_back(i);
+        return *this;
+    }
+
+    asc_parse_error make_exception() const {
+        // Uncomment to print a stack trace of the parser.
+        // A very useful tool for investigating invalid inputs or parser bugs.
+
+        //for (auto& frame: stack) std::cout << "  " << frame.file << ":" << frame.line << "\n";
+
+        return {msg, loc.line, loc.column};
+    }
+};
+
+template <typename T>
+using parse_hopefully = arb::util::expected<T, parse_error>;
+using arb::util::unexpected;
+using asc::tok;
+
+#define PARSE_ERROR(msg, loc) parse_error(msg, loc, {__FILE__, __LINE__})
+#define FORWARD_PARSE_ERROR(err) arb::util::unexpected(parse_error(std::move(err).append({__FILE__, __LINE__})))
+
+// The parse_* functions will attempt to parse an expected token from the lexer.
+
+// Attempt to parse a token of the expected kind. On succes the token is returned,
+// otherwise a parse_error.
+parse_hopefully<tok> expect_token(asc::lexer& l, tok kind) {
+    auto& t = l.current();
+    if (t.kind != kind) {
+        return unexpected(PARSE_ERROR("unexpected symbol '"+t.spelling+"'", t.loc));
+    }
+    l.next();
+    return kind;
+}
+
+#define EXPECT_TOKEN(L, TOK) {if (auto rval__ = expect_token(L, TOK); !rval__) return FORWARD_PARSE_ERROR(rval__.error());}
+
+// Attempt to parse a double precision value from the input stream.
+// Will consume both integer and real values.
+parse_hopefully<double> parse_double(asc::lexer& L) {
+    auto t = L.current();
+    if (!(t.kind==tok::integer || t.kind==tok::real)) {
+        return unexpected(PARSE_ERROR("missing real number", L.current().loc));
+    }
+    L.next(); // consume the number
+    return std::stod(t.spelling);
+}
+
+#define PARSE_DOUBLE(L, X) {if (auto rval__ = parse_double(L)) X=*rval__; else return FORWARD_PARSE_ERROR(rval__.error());}
+
+// Attempt to parse an integer in the range [0, 256).
+parse_hopefully<std::uint8_t> parse_uint8(asc::lexer& L) {
+    auto t = L.current();
+    if (t.kind!=tok::integer) {
+        return unexpected(PARSE_ERROR("missing uint8 number", L.current().loc));
+    }
+
+    // convert to large integer and test
+    auto value = std::stoll(t.spelling);
+    if (value<0 || value>255) {
+        return unexpected(PARSE_ERROR("value out of range [0, 255]", L.current().loc));
+    }
+    L.next(); // consume token
+    return static_cast<std::uint8_t>(value);
+}
+
+#define PARSE_UINT8(L, X) {if (auto rval__ = parse_uint8(L)) X=*rval__; else return FORWARD_PARSE_ERROR(rval__.error());}
+
+// Find the matching closing parenthesis, and consume it.
+// Assumes that opening paren has been consumed.
+void parse_to_closing_paren(asc::lexer& L, unsigned depth=0) {
+    while (true) {
+        const auto& t = L.current();
+        switch (t.kind) {
+            case tok::lparen:
+                L.next();
+                ++depth;
+                break;
+            case tok::rparen:
+                L.next();
+                if (depth==0) return;
+                --depth;
+                break;
+            case tok::error:
+                throw asc_parse_error(t.spelling, t.loc.line, t.loc.column);
+            case tok::eof:
+                throw asc_parse_error("unexpected end of file", t.loc.line, t.loc.column);
+            default:
+                L.next();
+        }
+    }
+}
+
+bool parse_if_symbol_matches(const char* match, asc::lexer& L) {
+    auto& t = L.current();
+    if (t.kind==tok::symbol && !std::strcmp(match, t.spelling.c_str())) {
+        L.next();
+        return true;
+    }
+    return false;
+}
+
+bool symbol_matches(const char* match, const asc::token& t) {
+    return t.kind==tok::symbol && !std::strcmp(match, t.spelling.c_str());
+}
+
+// Parse a color expression, which have been observed in the wild in two forms:
+//  (Color Red)                 ; labeled
+//  (Color RGB (152, 251, 152)) ; RGB literal
+struct asc_color {
+    uint8_t r;
+    uint8_t g;
+    uint8_t b;
+};
+
+std::ostream& operator<<(std::ostream& o, const asc_color& c) {
+    return o << "(asc-color " << (int)c.r << " " << (int)c.g << " " << (int)c.b << ")";
+}
+
+std::unordered_map<std::string, asc_color> color_map = {
+    {"Black",     {  0,   0,   0}},
+    {"White",     {255, 255, 255}},
+    {"Red",       {255,   0,   0}},
+    {"Lime",      {  0, 255,   0}},
+    {"Blue",      {  0,   0, 255}},
+    {"Yellow",    {255, 255,   0}},
+    {"Cyan",      {  0, 255, 255}},
+    {"Aqua",      {  0, 255, 255}},
+    {"Magenta",   {255,   0, 255}},
+    {"Fuchsia",   {255,   0, 255}},
+    {"Silver",    {192, 192, 192}},
+    {"Gray",      {128, 128, 128}},
+    {"Maroon",    {128,   0,   0}},
+    {"Olive",     {128, 128,   0}},
+    {"Green",     {  0, 128,   0}},
+    {"Purple",    {128,   0, 128}},
+    {"Teal",      {  0, 128, 128}},
+    {"Navy",      {  0,   0, 128}},
+    {"Orange",    {255, 165,   0}},
+};
+
+parse_hopefully<asc_color> parse_color(asc::lexer& L) {
+    EXPECT_TOKEN(L, tok::lparen);
+    if (!symbol_matches("Color", L.current())) {
+        return unexpected(PARSE_ERROR("expected Color symbol missing", L.current().loc));
+    }
+    // consume Color symbol
+    auto t = L.next();
+
+    if (parse_if_symbol_matches("RGB", L)) {
+        // Read RGB triple in the form (r, g, b)
+
+        EXPECT_TOKEN(L, tok::lparen);
+
+        asc_color color;
+        PARSE_UINT8(L, color.r);
+        EXPECT_TOKEN(L, tok::comma);
+        PARSE_UINT8(L, color.g);
+        EXPECT_TOKEN(L, tok::comma);
+        PARSE_UINT8(L, color.b);
+
+
+        EXPECT_TOKEN(L, tok::rparen);
+        EXPECT_TOKEN(L, tok::rparen);
+
+        return color;
+    }
+    else if (t.kind==tok::symbol) {
+        // Look up the symbol in the table
+        if (auto it = color_map.find(t.spelling); it!=color_map.end()) {
+            L.next();
+            EXPECT_TOKEN(L, tok::rparen);
+            return it->second;
+        }
+        else {
+            return unexpected(PARSE_ERROR("unknown color value '"+t.spelling+"'", t.loc));
+        }
+    }
+
+    return unexpected(PARSE_ERROR("unexpected symbol in Color description \'"+t.spelling+"\'", t.loc));
+}
+
+#define PARSE_COLOR(L, X) {if (auto rval__ = parse_color(L)) X=*rval__; else return FORWARD_PARSE_ERROR(rval__.error());}
+
+// Parse zSmear statement, which has the form:
+//  (zSmear alpha beta)
+// Where alpha and beta are double precision values.
+//
+//      Used to alter the Used to alter the displayed thickness of dendrites to
+//      resemble the optical aberration in z. This can be caused by both the
+//      point spread function and by refractive index mismatch between the specimen
+//      and the lens immersion medium. The diameter of a branch in z is adjusted
+//      using the following equation, Dz= Dxy*S, where Dxy is the recorded
+//      centerline diameter on the xy plane and S is the smear factor. The smear
+//      factor is calculated using this equation, S=α*Dxyβ. The minimum diameter
+//      is 1.0 µm, even if S values are less than 1.0.
+//
+// Which doesn't make much sense to track with our segment-based representation.
+
+struct zsmear {
+    double alpha;
+    double beta;
+};
+
+std::ostream& operator<<(std::ostream& o, const zsmear& z) {
+    return o << "(zsmear " << z.alpha << " " << z.beta << ")";
+}
+
+parse_hopefully<zsmear> parse_zsmear(asc::lexer& L) {
+    // check and consume opening paren
+    EXPECT_TOKEN(L, tok::lparen);
+
+    if (!symbol_matches("zSmear", L.current())) {
+        return unexpected(PARSE_ERROR("expected zSmear symbol missing", L.current().loc));
+    }
+    // consume zSmear symbol
+    auto t = L.next();
+
+    zsmear s;
+    PARSE_DOUBLE(L, s.alpha);
+    PARSE_DOUBLE(L, s.beta);
+
+    // check and consume closing paren
+    EXPECT_TOKEN(L, tok::rparen);
+
+    return s;
+}
+
+#define PARSE_ZSMEAR(L, X) if (auto rval__ = parse_zsmear(L)) X=*rval__; else return FORWARD_PARSE_ERROR(rval__.error());
+
+parse_hopefully<arb::mpoint> parse_point(asc::lexer& L) {
+    // check and consume opening paren
+    EXPECT_TOKEN(L, tok::lparen);
+
+    arb::mpoint p;
+    PARSE_DOUBLE(L, p.x);
+    PARSE_DOUBLE(L, p.y);
+    PARSE_DOUBLE(L, p.z);
+    PARSE_DOUBLE(L, p.radius);
+
+    // check and consume closing paren
+    EXPECT_TOKEN(L, tok::rparen);
+
+    return p;
+}
+
+#define PARSE_POINT(L, X) if (auto rval__ = parse_point(L)) X=*rval__; else return FORWARD_PARSE_ERROR(rval__.error());
+
+parse_hopefully<arb::mpoint> parse_spine(asc::lexer& L) {
+    EXPECT_TOKEN(L, tok::lt);
+    auto& t = L.current();
+    while (t.kind!=tok::gt && t.kind!=tok::error && t.kind!=tok::eof) {
+        L.next();
+    }
+    //if (t.kind!=error && t.kind!=eof)
+    EXPECT_TOKEN(L, tok::gt);
+
+    return arb::mpoint{};
+}
+
+#define PARSE_SPINE(L, X) if (auto rval__ = parse_spine(L)) X=std::move(*rval__); else return FORWARD_PARSE_ERROR(rval__.error());
+
+struct branch {
+    std::vector<arb::mpoint> samples;
+    std::vector<branch> children;
+};
+
+std::size_t num_samples(const branch& b) {
+    return b.samples.size() + std::accumulate(b.children.begin(), b.children.end(), std::size_t(0), [](std::size_t x, const branch& b) {return x+num_samples(b);});
+}
+
+struct sub_tree {
+    constexpr static int no_tag = std::numeric_limits<int>::min();
+    std::string name;
+    int tag = no_tag;
+    branch root;
+    asc_color color;
+};
+
+// Forward declaration.
+parse_hopefully<std::vector<branch>> parse_children(asc::lexer& L);
+
+parse_hopefully<branch> parse_branch(asc::lexer& L) {
+    branch B;
+
+    auto& t = L.current();
+
+    // Assumes that the opening parenthesis has already been consumed, because
+    // parsing of the first branch in a sub-tree starts on the first sample.
+
+    bool finished = t.kind == tok::rparen;
+
+    auto branch_end = [] (const asc::token& t) {
+        return t.kind == tok::pipe || t.kind == tok::rparen;
+    };
+
+    // One of these symbols must always be present at what Arbor calls a terminal.
+    auto branch_end_symbol = [] (const asc::token& t) {
+        return symbol_matches("Incomplete", t)
+            || symbol_matches("Low", t)
+            || symbol_matches("Normal", t)
+            || symbol_matches("Generated", t);
+    };
+
+    // Parse the samples in this branch up to either a terminal or an explicit fork.
+    while (!finished) {
+        auto p = L.peek();
+        // Assume that a sample has been found if the first value after a parenthesis
+        // is a number. An error will be returned if that is not the case.
+        if (t.kind==tok::lparen && (p.kind==tok::integer || p.kind==tok::real)) {
+            arb::mpoint sample;
+            PARSE_POINT(L, sample);
+            B.samples.push_back(sample);
+        }
+        // Spines are marked by a "less than", i.e. "<", symbol.
+        else if (t.kind==tok::lt) {
+            arb::mpoint spine;
+            PARSE_SPINE(L, spine);
+        }
+        // Test for a symbol that indicates a terminal.
+        else if (branch_end_symbol(t)) {
+            L.next(); // Consume symbol
+            if (!branch_end(t)) {
+                return unexpected(PARSE_ERROR("Incomplete, Normal, Low or Generated not at a branch terminal", t.loc));
+            }
+            finished = true;
+        }
+        else if (branch_end(t)) {
+            finished = true;
+        }
+        // The end of the branch followed by an explicit fork point.
+        else if (t.kind==tok::lparen) {
+            finished = true;
+        }
+        else {
+            return unexpected(PARSE_ERROR("Unexpected input '"+t.spelling+"'", t.loc));
+        }
+    }
+
+    // Recursively parse any child branches.
+    if (t.kind==tok::lparen) {
+        if (auto kids = parse_children(L)) {
+            B.children = std::move(*kids);
+        }
+        else {
+            return FORWARD_PARSE_ERROR(kids.error());
+        }
+    }
+
+    return B;
+}
+
+parse_hopefully<std::vector<branch>> parse_children(asc::lexer& L) {
+    std::vector<branch> children;
+
+    auto& t = L.current();
+
+    EXPECT_TOKEN(L, tok::lparen);
+
+    bool finished = t.kind==tok::rparen;
+    while (!finished) {
+        if (auto b1 = parse_branch(L)) {
+            children.push_back(std::move(*b1));
+        }
+        else {
+            return FORWARD_PARSE_ERROR(b1.error());
+        }
+
+        // Test for siblings, which are either marked sanely using the obvious
+        // logical "(", or with a too-clever-by-half "|".
+        finished = !(t.kind==tok::pipe || t.kind==tok::lparen);
+        if (!finished) L.next();
+    }
+
+    EXPECT_TOKEN(L, tok::rparen);
+
+    return children;
+}
+
+parse_hopefully<sub_tree> parse_sub_tree(asc::lexer& L) {
+    EXPECT_TOKEN(L, tok::lparen);
+
+    sub_tree tree;
+
+    // Parse the arbitrary, unordered and optional meta-data that may be prepended to the sub-tree.
+    //  string label, e.g. "Cell Body"
+    //  color, e.g. (Color Red)
+    //  z-smear, i.e. (zSmear alpha beta)
+    // And the required demarcation of CellBody, Axon, Dendrite or Apical.
+    while (true) {
+        auto& t = L.current();
+        // ASC files have an option to attach a string to the start of a sub-tree.
+        // If/when we find out what this string is applied to, we might create a dictionary entry for it.
+        if (t.kind == tok::string && !t.spelling.empty()) {
+            L.next();
+        }
+        else if (t.kind == tok::lparen) {
+            auto t = L.peek();
+            if (symbol_matches("Color", t)) {
+                PARSE_COLOR(L, tree.color);
+            }
+            // Every sub-tree is marked with one of: {CellBody, Axon, Dendrite, Apical}
+            // Hence it is possible to assign SWC tags for soma, axon, dend and apic.
+            else if (symbol_matches("CellBody", t)) {
+                tree.name = t.spelling;
+                tree.tag = 1;
+                L.next(2); // consume symbol
+                EXPECT_TOKEN(L, tok::rparen);
+            }
+            else if (symbol_matches("Axon", t)) {
+                tree.name = t.spelling;
+                tree.tag = 2;
+                L.next(2); // consume symbol
+                EXPECT_TOKEN(L, tok::rparen);
+            }
+            else if (symbol_matches("Dendrite", t)) {
+                tree.name = t.spelling;
+                tree.tag = 3;
+                L.next(2); // consume symbol
+                EXPECT_TOKEN(L, tok::rparen);
+            }
+            else if (symbol_matches("Apical", t)) {
+                tree.name = t.spelling;
+                tree.tag = 4;
+                L.next(2); // consume symbol
+                EXPECT_TOKEN(L, tok::rparen);
+            }
+            // Ignore zSmear.
+            else if (symbol_matches("zSmear", t)) {
+                PARSE_ZSMEAR(L, __attribute__((unused)) auto _);
+            }
+            else if (t.kind==tok::integer || t.kind==tok::real) {
+                // Assume that this is the first sample.
+                // Break to start parsing the samples in the sub-tree.
+                break;
+            }
+            else {
+                return unexpected(PARSE_ERROR("Unexpected input'"+t.spelling+"'", t.loc));
+            }
+        }
+        else if (t.kind == tok::rparen) {
+            // The end of the sub-tree expression was reached while parsing the header.
+            // Implies that there were no samples in the sub-tree, which we will treat
+            // as an error.
+            return unexpected(PARSE_ERROR("Empty sub-tree", t.loc));
+        }
+        else {
+            // An unexpected token was encountered.
+            return unexpected(PARSE_ERROR("Unexpected input '"+t.spelling+"'", t.loc));
+        }
+    }
+
+    if (tree.tag==tree.no_tag) {
+        return unexpected(PARSE_ERROR("Missing sub-tree label (CellBody, Axon, Dendrite or Apical)", L.current().loc));
+    }
+
+    // Now that the meta data has been read, process the samples.
+    // parse_branch will recursively construct the sub-tree.
+    if (auto branches = parse_branch(L)) {
+        tree.root = std::move(*branches);
+    }
+    else {
+        return FORWARD_PARSE_ERROR(branches.error());
+    }
+
+    EXPECT_TOKEN(L, tok::rparen);
+
+    return tree;
+}
+
+
+// Perform the parsing of the input as a string.
+asc_morphology parse_asc_string(const char* input) {
+    asc::lexer lexer(input);
+
+    std::vector<sub_tree> sub_trees;
+
+    // Iterate over high-level pseudo-s-expressions in the file.
+    // This pass simply parses the contents of the file, to be interpretted
+    // in a later pass.
+    while (lexer.current().kind != asc::tok::eof) {
+        auto t = lexer.current();
+
+        // Test for errors
+        if (t.kind == asc::tok::error) {
+            throw asc_parse_error(t.spelling, t.loc.line, t.loc.column);
+        }
+
+        // Expect that all top-level expressions start with open parenthesis '('
+        if (t.kind != asc::tok::lparen) {
+            throw asc_parse_error("expect opening '('", t.loc.line, t.loc.column);
+        }
+
+        // top level expressions are one of
+        //      ImageCoords
+        //      Sections
+        //      Description
+        t = lexer.peek();
+        if (symbol_matches("Description", t)) {
+            lexer.next();
+            parse_to_closing_paren(lexer);
+        }
+        else if (symbol_matches("ImageCoords", t)) {
+            lexer.next();
+            parse_to_closing_paren(lexer);
+        }
+        else if (symbol_matches("Sections", t)) {
+            lexer.next();
+            parse_to_closing_paren(lexer);
+        }
+        else {
+            if (auto tree = parse_sub_tree(lexer)) {
+                sub_trees.push_back(std::move(*tree));
+            }
+            else {
+                throw tree.error().make_exception();
+            }
+        }
+    }
+
+
+    // Return an empty description if no sub-trees were parsed.
+    if (!sub_trees.size()) {
+        return {};
+    }
+
+    // Process the sub-trees to construct the morphology and labels.
+    std::vector<std::size_t> soma_contours;
+    for (unsigned i=0; i<sub_trees.size(); ++i) {
+        if (sub_trees[i].tag == 1) soma_contours.push_back(i);
+    }
+
+    const auto soma_count = soma_contours.size();
+
+    // Assert that there is no more than one CellBody description.
+    // This case of multiple contours to define the soma has to be special cased.
+    if (soma_count!=1u) {
+        throw asc_unsupported("only 1 CellBody contour can be handled");
+    }
+
+    arb::segment_tree stree;
+
+    // Form a soma composed of two cylinders, extended along the positive and negative
+    // z axis from the center of the soma.
+    //
+    //          --------  soma_b
+    //          |      |
+    //          |      |
+    //          --------  soma_c
+    //          |      |
+    //          |      |
+    //          --------  soma_e
+    //
+
+    arb::mpoint soma_c, soma_b, soma_e;
+    if (soma_count==1u) {
+        const auto& st = sub_trees[soma_contours.front()];
+        const auto& samples = st.root.samples;
+        if (samples.size()==1u) {
+            // The soma is described as a sphere with a single sample.
+            soma_c = samples.front();
+        }
+        else {
+            // The soma is described by a contour.
+            const auto ns = samples.size();
+            soma_c.x = std::accumulate(samples.begin(), samples.end(), 0., [](double a, auto& s) {return a+s.x;}) / ns;
+            soma_c.y = std::accumulate(samples.begin(), samples.end(), 0., [](double a, auto& s) {return a+s.y;}) / ns;
+            soma_c.z = std::accumulate(samples.begin(), samples.end(), 0., [](double a, auto& s) {return a+s.z;}) / ns;
+            soma_c.radius = std::accumulate(samples.begin(), samples.end(), 0.,
+                    [&soma_c](double a, auto& c) {return a+arb::distance(c, soma_c);}) / ns;
+        }
+        soma_b = {soma_c.x, soma_c.y, soma_c.z-soma_c.radius, soma_c.radius};
+        soma_e = {soma_c.x, soma_c.y, soma_c.z+soma_c.radius, soma_c.radius};
+        stree.append(arb::mnpos, soma_c, soma_b, 1);
+        stree.append(arb::mnpos, soma_c, soma_e, 1);
+    }
+
+    // Append the dend, axon and apical dendrites.
+    for (const auto& st: sub_trees) {
+        const int tag = st.tag;
+
+        // Skip soma contours.
+        if (tag==1) continue;
+
+        // For now attach everything to the center of the soma at soma_c.
+        // Later we could try to attach to whichever end of the soma is closest.
+        // Also need to push parent id
+        struct binf {
+            const branch* child;
+            arb::msize_t parent_id;
+            arb::mpoint sample;
+        };
+
+        std::vector<binf> tails = {{&st.root, arb::mnpos, soma_c}};
+
+        while (!tails.empty()) {
+            auto head = tails.back();
+            tails.pop_back();
+
+            auto parent = head.parent_id;
+            auto prox_sample = head.sample;
+            auto& b = *head.child;
+
+            for (auto& dist_sample: b.samples) {
+                parent = stree.append(parent, prox_sample, dist_sample, tag);
+                prox_sample = dist_sample;
+            }
+
+            // Push child branches to stack in reverse order.
+            // This ensures that branches are popped from the stack in the same
+            // order they were described in the file, so that segments in the
+            // segment tree were added in the same order they are described
+            // in the file, to give deterministic branch numbering.
+            for (auto it=b.children.rbegin(); it!=b.children.rend(); ++it) {
+                tails.push_back({&(*it), parent, prox_sample});
+            }
+        }
+    }
+
+    // Construct the morphology.
+    arb::morphology morphology(stree);
+
+    // Construct the label dictionary.
+    arb::label_dict labels;
+    labels.set("soma", arb::reg::tagged(1));
+    labels.set("axon", arb::reg::tagged(2));
+    labels.set("dend", arb::reg::tagged(3));
+    labels.set("apic", arb::reg::tagged(4));
+
+    return {std::move(morphology), std::move(labels)};
+}
+
+asc_morphology load_asc(std::string filename) {
+    std::ifstream fid(filename);
+
+    if (!fid.good()) {
+        throw arb::file_not_found_error(filename);
+    }
+
+    // load contents of the file into a string.
+    std::string fstr;
+    fid.seekg(0, std::ios::end);
+    fstr.reserve(fid.tellg());
+    fid.seekg(0, std::ios::beg);
+
+    fstr.assign((std::istreambuf_iterator<char>(fid)),
+                 std::istreambuf_iterator<char>());
+
+    return parse_asc_string(fstr.c_str());
+}
+
+} // namespace arborio
+
diff --git a/doc/cpp/morphology.rst b/doc/cpp/morphology.rst
index 30fe87e9202c227942665f453cb9245f11219b3d..20b0a46fa96dcc960bb33121d7e5e4252a005788 100644
--- a/doc/cpp/morphology.rst
+++ b/doc/cpp/morphology.rst
@@ -415,6 +415,29 @@ basic checks performed on them. The :cpp:type:`swc_data` object can then be used
 
    Returns a :cpp:type:`morphology` constructed according to NEURON's SWC specifications.
 
+.. _cppasc:
+
+Neurolucida ASCII
+^^^^^^^^^^^^^^^^^^
+
+Arbor supports reading morphologies described using the
+:ref:`Neurolucida <format_asc>`_ file format.
+
+The :cpp:func:`parse_asc()` function is used to parse the SWC file and generate a :cpp:type:`asc_morphology` object,
+which a simple struct with two members representing the morphology and a label dictionary with labeled
+regions and locations.
+
+.. cpp:class:: asc_morphology
+
+   .. cpp:member:: arb::morphology morphology
+
+   .. cpp:member:: arb::label_dict labels
+
+.. cpp:function:: asc_morphology load_asc(const std::string& filename)
+
+   Parse a Neurolucida ASCII file.
+   Throws an exception if there is an error parsing the file.
+
 .. _cppneuroml:
 
 NeuroML
@@ -564,4 +587,3 @@ which is intended to identify the problematic construct within the document.
 .. cpp:class:: cyclic_dependency: neuroml_exception
 
    A segment or segment group ultimately refers to itself via ``parent``
-   or ``include`` elements respectively.
\ No newline at end of file
diff --git a/doc/fileformat/asc.rst b/doc/fileformat/asc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cc1c5ccdd35a3f255e360ae942a041f1b2dc68e8
--- /dev/null
+++ b/doc/fileformat/asc.rst
@@ -0,0 +1,61 @@
+.. _formatasc:
+
+Neurlucida ASCII format
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Arbor has support for reading cable cell morphologies described using the
+`Neurlucida ASCII file format <https://www.mbfbioscience.com/help/pdf/NL9.pdf>`_
+or ``.asc`` file format.
+
+Because ASCII files contain both a morphology description and meta data, the
+loader returns both a morphology, and a label dictionary that describes regions
+and locsets from the meta data.
+
+.. warning::
+    The ASCII file format has no formal specification, and describes not only the cell
+    morphology and features like spines, but rich meta data about markers and colors.
+    Not all of the information is relevant for simulation.
+
+    Arbor's ASCII importer discards descriptions that it determines are
+    not relevant for simulation (e.g. color meta-data attached to a dendrite).
+    However Arbor will throw an error when it encounters content that it can't interpret,
+    unlike some other readers that will ignore anything they don't recognise.
+
+    Because Arbor does not yet recognise all common asc file patterns, it is possible that your
+    model might return an error message when you try to load it in Arbor.
+    We add support for features as they are bought to our attention, because we must rely on users
+    in place of a formal specification.
+
+    Please open an `issue <https://github.com/arbor-sim/arbor/issues>`_ if:
+
+      * you have an ``.asc`` file that Arbor can't parse;
+      * or there is meta data, such as spine locations, that you would like to see in the output;
+
+    and we will add support for your ASCII files.
+
+Soma / CellBody
+""""""""""""""""
+
+The soma, or CellBody, is described in one of three different methods (that we are aware of) in
+an ASCII file.
+
+  1. As a CellBody statement containing a single location and radius, which models **a sphere**.
+  2. As a CellBody statement containing an unbranched sequence of locations that define **a single contour**.
+  3. As multiple CellBody statements, each defining a contour, that describe the soma as **a stack of contours**.
+
+Arbor supports description methods 1 and 2, and support for method 3 can be added on request
+(open an issue).
+
+In each case, the soma is modeled as a cylinder with diameter equal to it's length, centred
+at the centre of the soma, and oriented along the z axis.
+
+For a **spherical** soma, the centre and diameter are that of the sphere. For
+a **contour**, the centre is the centroid of the locations that define the contour,
+and the radius is the average distance of the centre to the locations on the countour.
+
+API
+"""
+
+* :ref:`Python <pyasc>`
+* :ref:`C++ <cppasc>`
+
diff --git a/doc/index.rst b/doc/index.rst
index e61d4f3a4f07b202aed3c173cb8ae657cae18f8f..944841201e1aace4b947799a80de9b72af827235 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -107,6 +107,7 @@ Arbor is an `eBrains project <https://ebrains.eu/service/arbor/>`_.
 
    fileformat/swc
    fileformat/neuroml
+   fileformat/asc
    fileformat/nmodl
 
 .. toctree::
diff --git a/doc/python/morphology.rst b/doc/python/morphology.rst
index 8f58623d775f76f0d0969b62dd196179826197b3..a68b4ee0c812ef3cdf1eb516b78bb88208ef1455 100644
--- a/doc/python/morphology.rst
+++ b/doc/python/morphology.rst
@@ -646,3 +646,37 @@ constitute part of the CV boundary point set.
 
       :param str morph_id: ID of the cell.
       :rtype: optional(neuroml_morph_data)
+
+.. _pyasc:
+
+.. py:class:: asc_morphology
+
+   The morphology and label dictionary meta-data loaded from a Neurolucida ASCII ``.asc`` file.
+
+   .. py:attribute:: morphology
+
+       The cable cell morphology.
+
+   .. py:attribute:: labels
+
+       The labeled regions and locations extracted from the meta data. The four canonical regions are labeled
+       ``'soma'``, ``'axon'``, ``'dend'`` and ``'apic'``.
+
+.. py:function:: load_asc(filename)
+
+   Loads the :class:`asc_morphology` from a :ref:`Neurolucida ASCII file <formatasc>`.
+
+   .. code-block:: Python
+
+       import arbor
+
+       # Load morphology and labels from file.
+       asc = arbor.load_asc('granule.asc')
+
+       # Construct a cable cell.
+       decor = arbor.decor()
+       cell = arbor.cable_cell(asc.morphology, asc.labels, decor)
+
+
+   :param str filename: the name of the input file.
+   :rtype: asc_morphology
diff --git a/python/morphology.cpp b/python/morphology.cpp
index aa0425b4ba18d46170c3c6118cee63867968b481..d15457855ddcad2ddd237941704999d9cacbd414 100644
--- a/python/morphology.cpp
+++ b/python/morphology.cpp
@@ -13,6 +13,7 @@
 #include <arbor/version.hpp>
 
 #include <arborio/swcio.hpp>
+#include <arborio/neurolucida.hpp>
 
 #ifdef ARB_NEUROML_ENABLED
 #include <arborio/arbornml.hpp>
@@ -370,6 +371,31 @@ void register_morphology(py::module& m) {
                     return util::pprintf("<arbor.morphology:\n{}>", m);
                 });
 
+    // Neurolucida ASCII, or .asc, file format support.
+
+    py::class_<arborio::asc_morphology> asc_morphology(m, "asc_morphology",
+            "The morphology and label dictionary meta-data loaded from a Neurolucida ASCII (.asc) file.");
+    asc_morphology
+        .def_readonly("morphology",
+                &arborio::asc_morphology::morphology,
+                "The cable cell morphology")
+        .def_property_readonly("labels",
+            [](const arborio::asc_morphology& m) {return label_dict_proxy(m.labels);},
+            "The four canonical regions are labeled 'soma', 'axon', 'dend' and 'apic'.");
+
+    m.def("load_asc",
+        [](std::string fname) {
+            try {
+                return arborio::load_asc(fname);
+            }
+            catch (std::exception& e) {
+                // Try to produce helpful error messages for SWC parsing errors.
+                throw pyarb_error(util::pprintf("error loading neurolucida asc file: {}", e.what()));
+            }
+        },
+        "filename"_a, "Load a morphology and meta data from a Neurolucida ASCII .asc file.");
+
+
 #ifdef ARB_NEUROML_ENABLED
     // arborio::morphology_data
     py::class_<arborio::morphology_data> nml_morph_data(m, "neuroml_morph_data");
@@ -412,7 +438,7 @@ void register_morphology(py::module& m) {
                   return arborio::neuroml(string_data);
               }
               catch (arborio::neuroml_exception& e) {
-                  // Try to produce helpful error messages for SWC parsing errors.
+                  // Try to produce helpful error messages for NeuroML parsing errors.
                   throw pyarb_error(
                       util::pprintf("NeuroML error processing file {}: ", fname, e.what()));
               }
diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt
index 43418b99dbd01b6370f47ea275ad4152d8097187..a6d0f519e69e18825a8bec18e830a9e2416b979c 100644
--- a/test/unit/CMakeLists.txt
+++ b/test/unit/CMakeLists.txt
@@ -85,6 +85,7 @@ endforeach()
 
 set(unit_sources
     ../common_cells.cpp
+    test_asc.cpp
     test_any_cast.cpp
     test_any_ptr.cpp
     test_any_visitor.cpp
diff --git a/test/unit/test_asc.cpp b/test/unit/test_asc.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..05807fa714b0066bffff9ba073d751728b368f7c
--- /dev/null
+++ b/test/unit/test_asc.cpp
@@ -0,0 +1,243 @@
+#include <iostream>
+#include <fstream>
+
+#include <arbor/cable_cell.hpp>
+#include <arbor/morph/primitives.hpp>
+#include <arbor/morph/segment_tree.hpp>
+
+#include <arborio/neurolucida.hpp>
+
+#include "../gtest.h"
+#include "arbor/arbexcept.hpp"
+
+TEST(asc, file_not_found) {
+    EXPECT_THROW(arborio::load_asc("this-file-does-not-exist.asc"), arb::file_not_found_error);
+}
+
+// Declare the implementation of the parser that takes a string input
+namespace arborio {
+asc_morphology parse_asc_string(const char* input);
+}
+
+// Test different forms of empty files.
+TEST(asc, empty_file) {
+    // A file with no contents at all.
+    {
+        const char* empty_file = "";
+        auto m = arborio::parse_asc_string(empty_file);
+        EXPECT_TRUE(m.morphology.empty());
+    }
+
+    // Inputs with header meta-data, but no sub-trees.
+    {
+        const char * input =
+" (Description \
+)  ;  End of description \
+(ImageCoords Filename \
+ \"C:\\local\\input.lsm\" \
+ Merge 65535 65535 65535 0 \
+ Coords 0.057299 0.057299 0 0 0 0.057299 0.057299 0 0 -0.360149 0.057299 \
+ 0.057299 0 0 -0.720299 0.057299 0.057299 0 0 -1.080448 0.057299 0.057299 0 0 \
+ -1.440598 0.057299 0.057299 0 0 -1.800747 0.057299 0.057299 0 0 -2.160896 \
+) ; End of ImageCoord";
+        auto m = arborio::parse_asc_string(input);
+        EXPECT_TRUE(m.morphology.empty());
+    }
+}
+
+// Morphologies with only the CellBody
+TEST(asc, only_cell_body) {
+    { // CellBody with no samples: should be an error.
+        const char* input = "((CellBody))";
+        EXPECT_THROW(arborio::parse_asc_string(input), arborio::asc_parse_error);
+    }
+
+    { // CellBody with a single sample defining the center and radius of a sphere.
+        const char* input = "((CellBody) (0 0 1 2))";
+        auto m = arborio::parse_asc_string(input);
+
+        // Soma is a cylinder composed of two cylinders attached to the root.
+        EXPECT_EQ(m.morphology.num_branches(), 2u);
+
+        auto& segs1 = m.morphology.branch_segments(0);
+        EXPECT_EQ(segs1.size(), 1u);
+        EXPECT_EQ(segs1[0].prox, (arb::mpoint{0., 0., 1., 2.}));
+        EXPECT_EQ(segs1[0].dist, (arb::mpoint{0., 0., -1., 2.}));
+
+        auto& segs2 = m.morphology.branch_segments(1);
+        EXPECT_EQ(segs2.size(), 1u);
+        EXPECT_EQ(segs2[0].prox, (arb::mpoint{0., 0., 1., 2.}));
+        EXPECT_EQ(segs2[0].dist, (arb::mpoint{0., 0., 3., 2.}));
+    }
+
+
+    { // CellBody with a circular contour defined by 4 points
+        const char* input = "((CellBody) (-2 0 1 0) (0 2 1 0) (2 0 1 0) (0 -2 1 0))";
+        auto m = arborio::parse_asc_string(input);
+
+        // Soma is a cylinder composed of two cylinders attached to the root.
+        EXPECT_EQ(m.morphology.num_branches(), 2u);
+
+        auto& segs1 = m.morphology.branch_segments(0);
+        EXPECT_EQ(segs1.size(), 1u);
+        EXPECT_EQ(segs1[0].prox, (arb::mpoint{0., 0., 1., 2.}));
+        EXPECT_EQ(segs1[0].dist, (arb::mpoint{0., 0., -1., 2.}));
+
+        auto& segs2 = m.morphology.branch_segments(1);
+        EXPECT_EQ(segs2.size(), 1u);
+        EXPECT_EQ(segs2[0].prox, (arb::mpoint{0., 0., 1., 2.}));
+        EXPECT_EQ(segs2[0].dist, (arb::mpoint{0., 0., 3., 2.}));
+    }
+
+    { // Cell with two CellBodys: unsupported feature ATM.
+        const char* input =
+"((CellBody)\
+  (-2 0 1 0)\
+  (0 2 1 0))\
+ ((CellBody)\
+  (-2 0 3 0)\
+  (0 2 3 0))";
+        EXPECT_THROW(arborio::parse_asc_string(input), arborio::asc_unsupported);
+    }
+}
+
+// Test parsing of basic meta data that can be added to the start of a 
+// This information is discarded and not used for building the morphology,
+// however it is still required that it should be parsed, and throw an error
+// if ill-formed or unexpected meta-data is encountered.
+TEST(asc, sub_tree_meta) {
+    {   // String 
+        const char * input = "(\"Soma\" (CellBody) (237.86 -189.71 -6.49 0.06))";
+        auto m = arborio::parse_asc_string(input);
+        EXPECT_EQ(m.morphology.num_branches(), 2u);
+    }
+
+    {  // Named color
+        const char * input = "((Color Red) (CellBody) (237.86 -189.71 -6.49 0.06))";
+        auto m = arborio::parse_asc_string(input);
+        EXPECT_EQ(m.morphology.num_branches(), 2u);
+    }
+
+    {   // RGB color
+        const char * input = "((Color RGB(128, 128, 96)) (CellBody) (237.86 -189.71 -6.49 0.06))";
+        auto m = arborio::parse_asc_string(input);
+        EXPECT_EQ(m.morphology.num_branches(), 2u);
+    }
+
+    {   // badly formatted RGB color: missing comma
+        const char * input = "((Color RGB(128  128, 96)) (CellBody) (237.86 -189.71 -6.49 0.06))";
+        EXPECT_THROW(arborio::parse_asc_string(input), arborio::asc_parse_error);
+    }
+
+    {   // badly formatted RGB color: out of range triple value
+        const char * input = "((Color RGB(128,  128, 256)) (CellBody) (237.86 -189.71 -6.49 0.06))";
+        EXPECT_THROW(arborio::parse_asc_string(input), arborio::asc_parse_error);
+    }
+}
+
+TEST(asc, branching) {
+    {
+        // Soma composed of 2 branches, and stick and fork dendrite composed of 3 branches.
+        const char* input =
+"((CellBody)\
+ (0 0 0 2)\
+)\
+((Dendrite)\
+ (0 2 0 1)\
+ (0 5 0 1)\
+ (\
+  (-5 5 0 1)\
+  |\
+  (6 5 0 1)\
+ )\
+ )";
+        auto result = arborio::parse_asc_string(input);
+        const auto& m = result.morphology;
+        EXPECT_EQ(m.num_branches(), 5u);
+        EXPECT_EQ(m.branch_children(0).size(), 0u);
+        EXPECT_EQ(m.branch_children(1).size(), 0u);
+        EXPECT_EQ(m.branch_children(2).size(), 2u);
+        EXPECT_EQ(m.branch_children(2)[0], 3u);
+        EXPECT_EQ(m.branch_children(2)[1], 4u);
+        EXPECT_EQ(m.branch_children(3).size(), 0u);
+        EXPECT_EQ(m.branch_children(4).size(), 0u);
+    }
+    {
+        // Soma composed of 2 branches, and a dendrite with a bit more interesting branching.
+        const char* input =
+"((CellBody)\
+ (0 0 0 2)\
+)\
+((Dendrite)\
+ (0 2 0 1)\
+ (0 5 0 1)\
+ (\
+  (-5 5 0 1)\
+  (\
+   (-5 5 0 1)\
+   |\
+   (6 5 0 1)\
+  )\
+  |\
+  (6 5 0 1)\
+ )\
+ )";
+        auto result = arborio::parse_asc_string(input);
+        const auto& m = result.morphology;
+        EXPECT_EQ(m.num_branches(), 7u);
+        EXPECT_EQ(m.branch_children(0).size(), 0u);
+        EXPECT_EQ(m.branch_children(1).size(), 0u);
+        EXPECT_EQ(m.branch_children(2).size(), 2u);
+        EXPECT_EQ(m.branch_children(2)[0], 3u);
+        EXPECT_EQ(m.branch_children(2)[1], 6u);
+        EXPECT_EQ(m.branch_children(3).size(), 2u);
+        EXPECT_EQ(m.branch_children(3)[0], 4u);
+        EXPECT_EQ(m.branch_children(3)[1], 5u);
+        EXPECT_EQ(m.branch_children(4).size(), 0u);
+        EXPECT_EQ(m.branch_children(5).size(), 0u);
+        EXPECT_EQ(m.branch_children(6).size(), 0u);
+    }
+    {
+        // Soma composed of 2 branches, and stick and fork dendrite and axon
+        // composed of 3 branches each.
+        const char* input =
+"((CellBody)\
+ (0 0 0 2)\
+)\
+((Dendrite)\
+ (0 2 0 1)\
+ (0 5 0 1)\
+ (\
+  (-5 5 0 1)\
+  |\
+  (6 5 0 1)\
+ )\
+)\
+((Axon)\
+ (0 -2 0 1)\
+ (0 -5 0 1)\
+ (\
+  (-5 -5 0 1)\
+  |\
+  (6 -5 0 1)\
+ )\
+)";
+        auto result = arborio::parse_asc_string(input);
+        const auto& m = result.morphology;
+        EXPECT_EQ(m.num_branches(), 8u);
+        EXPECT_EQ(m.branch_children(0).size(), 0u);
+        EXPECT_EQ(m.branch_children(1).size(), 0u);
+        EXPECT_EQ(m.branch_children(2).size(), 2u);
+        EXPECT_EQ(m.branch_children(2)[0], 3u);
+        EXPECT_EQ(m.branch_children(2)[1], 4u);
+        EXPECT_EQ(m.branch_children(3).size(), 0u);
+        EXPECT_EQ(m.branch_children(4).size(), 0u);
+        EXPECT_EQ(m.branch_children(5).size(), 2u);
+        EXPECT_EQ(m.branch_children(5)[0], 6u);
+        EXPECT_EQ(m.branch_children(5)[1], 7u);
+        EXPECT_EQ(m.branch_children(6).size(), 0u);
+        EXPECT_EQ(m.branch_children(7).size(), 0u);
+    }
+
+}
+