aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorRalph Amissah <ralph@amissah.com>2016-06-16 01:49:06 -0400
committerRalph Amissah <ralph@amissah.com>2019-04-04 14:48:18 -0400
commit8ab7e935913c102fb039110e20b71f698a68c6ee (patch)
tree3472debd16ce656a57150399ce666e248565f011
parentstep4.1 as step4 but extract header meta & make on first reading in document (diff)
step5 sdlang used for config files and doc headers
-rw-r--r--makefile4
-rw-r--r--maker.org61
-rw-r--r--org/ao_abstract_doc_source.org39
-rw-r--r--org/ao_header_extract.org438
-rw-r--r--org/ao_output_debugs.org8
-rw-r--r--org/ao_read_source_files.org178
-rw-r--r--org/output.org23
-rw-r--r--org/sdp.org190
-rw-r--r--src/sdlang/ast.d1834
-rw-r--r--src/sdlang/dub.json38
-rw-r--r--src/sdlang/exception.d42
-rw-r--r--src/sdlang/lexer.d2068
-rw-r--r--src/sdlang/libinputvisitor/dub.json10
-rw-r--r--src/sdlang/libinputvisitor/libInputVisitor.d91
-rw-r--r--src/sdlang/package.d132
-rw-r--r--src/sdlang/parser.d551
-rw-r--r--src/sdlang/symbol.d61
-rw-r--r--src/sdlang/token.d505
-rw-r--r--src/sdlang/util.d84
-rwxr-xr-xsrc/sdp.d155
-rw-r--r--src/sdp/ao_abstract_doc_source.d125
-rw-r--r--src/sdp/ao_header_extract.d334
-rw-r--r--src/sdp/ao_output_debugs.d152
-rw-r--r--src/sdp/ao_read_config_files.d71
-rw-r--r--src/sdp/ao_read_source_files.d59
-rw-r--r--views/version.txt2
26 files changed, 6092 insertions, 1163 deletions
diff --git a/makefile b/makefile
index 283c44e..c983c18 100644
--- a/makefile
+++ b/makefile
@@ -74,6 +74,8 @@ dub_dmd_debug_clean: tangle
$(DUB) $(DUB_FLAGS)dmd --config=sdp-debug-clean
dub_dmd_tmp: tangle
$(DUB) $(DUB_FLAGS)dmd --config=sdp-tmp
+dub_dmd_debug_src:
+ $(DUB) $(DUB_FLAGS)dmd --config=sdp-debug
# ldc2
dub_ldc_release: expunge skel tangle
$(DUB) $(DUB_FLAGS)ldc2 --config=sdp-release
@@ -85,6 +87,8 @@ dub_ldc_debug_clean: tangle
$(DUB) $(DUB_FLAGS)ldc2 --config=sdp-debug-clean
dub_ldc_tmp: tangle
$(DUB) $(DUB_FLAGS)ldc2 --config=sdp-tmp
+dub_ldc_debug_src:
+ $(DUB) $(DUB_FLAGS)ldc2 --config=sdp-debug
# test releases
dub_release_test_dmd: tangle dub_dmd_release_test
dub_release_test_ldc: tangle dub_ldc_release_test
diff --git a/maker.org b/maker.org
index 5c7a0d3..61c0826 100644
--- a/maker.org
+++ b/maker.org
@@ -133,6 +133,7 @@ Set debug flags using DMD standard flag -debug= e.g.:
poem
quote
raw
+ sdlang
source
srclines
structattrib
@@ -242,6 +243,8 @@ dub_dmd_debug_clean: tangle
$(DUB) $(DUB_FLAGS)dmd --config=sdp-debug-clean
dub_dmd_tmp: tangle
$(DUB) $(DUB_FLAGS)dmd --config=sdp-tmp
+dub_dmd_debug_src:
+ $(DUB) $(DUB_FLAGS)dmd --config=sdp-debug
# ldc2
dub_ldc_release: expunge skel tangle
$(DUB) $(DUB_FLAGS)ldc2 --config=sdp-release
@@ -253,6 +256,8 @@ dub_ldc_debug_clean: tangle
$(DUB) $(DUB_FLAGS)ldc2 --config=sdp-debug-clean
dub_ldc_tmp: tangle
$(DUB) $(DUB_FLAGS)ldc2 --config=sdp-tmp
+dub_ldc_debug_src:
+ $(DUB) $(DUB_FLAGS)ldc2 --config=sdp-debug
# test releases
dub_release_test_dmd: tangle dub_dmd_release_test
dub_release_test_ldc: tangle dub_ldc_release_test
@@ -521,6 +526,62 @@ dflags platform="dmd" "-O -release"
}
#+END_SRC
+**** +sdlang+ :sdlang:
+#+BEGIN_SRC json :tangle ./src/sdlang/dub.json
+{
+ "name": "sdlang-d",
+ "description": "An SDL (Simple Declarative Language) library for D.",
+ "homepage": "http://github.com/Abscissa/SDLang-D",
+ "authors": ["Nick Sabalausky"],
+ "license": "zlib/libpng",
+ "copyright": "©2012-2015 Nick Sabalausky",
+ "sourcePaths": ["."],
+ "importPaths": ["."],
+ "buildRequirements": ["allowWarnings"],
+ "dependencies": {
+ "libinputvisitor": "~>1.2.0"
+ },
+ "subPackages": [
+ "./libinputvisitor"
+ ],
+ "configurations": [
+ {
+ "name": "test",
+ "targetType": "executable",
+ "versions": ["SDLang_TestApp"],
+ "targetPath": "../../bin/",
+ "targetName": "sdlang"
+ },
+ {
+ "name": "library",
+ "targetType": "library"
+ },
+ {
+ "name": "unittest",
+ "targetType": "executable",
+ "targetPath": "../../bin/",
+ "targetName": "sdlang-unittest",
+
+ "versions": ["sdlangUnittest", "sdlangTrace"]
+ }
+ ]
+}
+#+END_SRC
+
+**** +libinputvisitor+ :libinputvisitor:
+#+BEGIN_SRC json :tangle ./src/sdlang/libinputvisitor/dub.json
+{
+ "name": "libinputvisitor",
+ "description": "Write D input range generators in a straightforward coroutine style",
+ "authors": ["Nick Sabalausky"],
+ "homepage": "https://github.com/abscissa/libInputVisitor",
+ "license": "WTFPL",
+ "sourcePaths": ["."],
+ "importPaths": ["."],
+ "excludedSourceFiles": ["libInputVisitorExample.d"]
+}
+#+END_SRC
+
** .gitignore :gitignore:
#+BEGIN_SRC sh :tangle .gitignore
diff --git a/org/ao_abstract_doc_source.org b/org/ao_abstract_doc_source.org
index 557cda0..9e9e9e5 100644
--- a/org/ao_abstract_doc_source.org
+++ b/org/ao_abstract_doc_source.org
@@ -463,24 +463,24 @@ if (matchFirst(line, rgx.block_open)) {
&& ((type["para"] == State.off)
&& (type["heading"] == State.off))) {
/+ heading or para but neither flag nor line exists +/
- if ((to!string(dochead_make_json["make"]["headings"]).length > 2)
- && (type["make_headings"] == State.off)) {
- /+ heading found +/
- auto dochead_make_headings =
- to!string(dochead_make_json["make"]["headings"]);
- heading_found(line, dochead_make_headings, heading_match_str, heading_match_rgx, type);
- }
+ // if ((to!string(dochead_make["make"]["headings"]).length > 2)
+ // && (type["make_headings"] == State.off)) {
+ // /+ heading found +/
+ // auto dochead_make_headings =
+ // to!string(dochead_make["make"]["headings"]);
+ // heading_found(line, dochead_make_headings, heading_match_str, heading_match_rgx, type);
+ // }
if ((type["make_headings"] == State.on)
&& ((line_occur["para"] == State.off)
&& (line_occur["heading"] == State.off))
&& ((type["para"] == State.off)
&& (type["heading"] == State.off))) {
/+ heading make set +/
- heading_make_set(line, line_occur, heading_match_rgx, type);
+ // heading_make_set(line, line_occur, heading_match_rgx, type);
}
if (matchFirst(line, rgx.heading)) {
/+ heading match +/
- heading_matched(line, line_occur, an_object, lv, collapsed_lev, type, dochead_meta_json);
+ heading_matched(line, line_occur, an_object, lv, collapsed_lev, type);
} else if (line_occur["para"] == State.off) {
/+ para match +/
para_match(line, an_object, indent, bullet, type, line_occur);
@@ -2000,7 +2000,7 @@ auto book_index(
** heading or paragraph :heading:paragraph:
*** heading found :heading:
-#+name: abs_functions
+##+name: abs_functions
#+BEGIN_SRC d
auto heading_found(
char[] line,
@@ -2164,8 +2164,8 @@ auto heading_matched(
ref string[string] an_object,
ref int[string] lv,
ref int[string] collapsed_lev,
- ref int[string] type,
- ref JSONValue[string] dochead_meta_json
+ ref int[string] type
+ // ref JSONValue[string] dochead_meta_json
) {
if (auto m = match(line, rgx.heading)) {
/+ heading match +/
@@ -2179,10 +2179,10 @@ auto heading_matched(
assertions_doc_structure(an_object, lv); // includes most of the logic for collapsed levels
switch (an_object["lev"]) {
case "A":
- an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(dochead_meta_json["title"]["main"]));
- an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(dochead_meta_json["creator"]["author"]));
- // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(parseJSON(dochead_meta_json["title"]["main"])));
- // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(parseJSON(dochead_meta_json["creator"]["author"])));
+ // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(dochead_metadata["title"]["main"]));
+ // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(dochead_metadata["creator"]["author"]));
+ // // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(parseJSON(dochead_metadata["title"]["main"])));
+ // // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(parseJSON(dochead_metadata["creator"]["author"])));
collapsed_lev["h0"] = 1;
an_object["lev_collapsed_number"] =
to!string(collapsed_lev["h0"]);
@@ -4039,11 +4039,8 @@ template SiSUdocAbstraction() {
<<abs_init_struct>>
/+ ↓ abstract marked up document +/
- auto abstract_doc_source(
- char[][] markup_sourcefile_content,
- JSONValue[string] dochead_make_json,
- JSONValue[string] dochead_meta_json
- ) {
+ auto abstract_doc_source(char[][] markup_sourcefile_content) {
+
/+ ↓ abstraction init +/
<<abs_init_rest>>
/+ abstraction init ↑ +/
diff --git a/org/ao_header_extract.org b/org/ao_header_extract.org
deleted file mode 100644
index d075c7c..0000000
--- a/org/ao_header_extract.org
+++ /dev/null
@@ -1,438 +0,0 @@
-#+TITLE: sdp header extract
-#+AUTHOR: Ralph Amissah
-#+EMAIL: ralph.amissah@gmail.com
-#+STARTUP: indent
-#+LANGUAGE: en
-#+OPTIONS: H:3 num:nil toc:t \n:nil @:t ::t |:t ^:nil _:nil -:t f:t *:t <:t
-#+OPTIONS: TeX:t LaTeX:t skip:nil d:nil todo:t pri:nil tags:not-in-toc
-#+OPTIONS: author:nil email:nil creator:nil timestamp:nil
-#+PROPERTY: header-args :padline no :exports code :noweb yes
-#+EXPORT_SELECT_TAGS: export
-#+EXPORT_EXCLUDE_TAGS: noexport
-#+FILETAGS: :sdp:niu:ao:
-#+TAGS: assert(a) class(c) debug(d) mixin(m) sdp(s) tangle(T) template(t) WEB(W) noexport(n)
-
-[[./sdp.org][sdp]] [[./][org/]]
-* header
-
-// mixin SiSUheader;
-// auto set_header = HeaderDocMetadataMakeJson(); // reintroduce
-
-** header document metadata in json :json:
-
-#+name: ao_markup_header_extract
-#+BEGIN_SRC d
-auto header_metadata_and_make_jsonstr(
- string header,
- JSONValue[string] dochead_meta,
- JSONValue[string] dochead_make
-)
-in { }
-body {
- scope(exit) {
- destroy(header);
- destroy(dochead_meta);
- destroy(dochead_make);
- }
- if (auto t = match(header, rgx.head_main)) {
- char[][] obj_spl = split(
- cast(char[]) header,
- rgx.line_delimiter_ws_strip
- );
- auto hm = to!string(t.captures[1]);
- if (match(hm, rgx.main_headers)) {
- foreach (line; obj_spl) {
- if (auto m = match(line, rgx.head_main)) {
- if (!empty(m.captures[2])) {
- if (hm == "creator") {
- dochead_meta[hm]["author"].str =
- to!string(m.captures[2]);
- } else if (hm == "title") {
- dochead_meta[hm]["main"].str =
- to!string(m.captures[2]);
- } else if (hm == "publisher") {
- dochead_meta[hm]["name"].str =
- to!string(m.captures[2]);
- }
- }
- } else if (auto s = match(line, rgx.head_sub)) {
- if (!empty(s.captures[2])) {
- auto hs = to!string(s.captures[1]);
- if ((hm == "make" )
- && (dochead_make[hm].type() == JSON_TYPE.OBJECT)) {
- switch (hm) {
- case "make":
- if (match(hs, rgx.subhead_make)) {
- if (dochead_make[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_make[hm][hs].str = to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- default:
- break;
- }
- } else if (dochead_meta[hm].type() == JSON_TYPE.OBJECT) {
- switch (hm) {
- case "creator":
- if (match(hs, rgx.subhead_creator)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "title":
- if (match(hs, rgx.subhead_title)) {
- if ((hs == "subtitle")
- && (dochead_meta[hm]["sub"].type() == JSON_TYPE.STRING)) {
- dochead_meta[hm]["sub"].str =
- to!string(s.captures[2]);
- } else if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "rights":
- if (match(hs, rgx.subhead_rights)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "date":
- if (match(hs, rgx.subhead_date)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "original":
- if (match(hs, rgx.subhead_original)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "classify":
- if (match(hs, rgx.subhead_classify)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "identifier":
- if (match(hs, rgx.subhead_identifier)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "notes":
- if (match(hs, rgx.subhead_notes)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "publisher":
- if (match(hs, rgx.subhead_publisher)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "links":
- destroy(hm);
- destroy(hs);
- // if (match(hs, rgx.subhead_links)) {
- // if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- // dochead_meta[hm][hs].str = to!string(s.captures[2]);
- // }
- // } else {
- // writeln("not a valid header type:", hm, ":", hs);
- // destroy(hm);
- // destroy(hs);
- // }
- break;
- default:
- break;
- }
- }
- }
- }
- }
- } else {
- writeln("not a valid header type:", hm);
- }
- }
- auto t = tuple(dochead_meta, dochead_make);
- static assert(!isTypeTuple!(t));
- return t;
-}
-#+END_SRC
-
-** header extract
-#+name: ao_markup_header_extract
-#+BEGIN_SRC d
-private auto header_extract(
- char[] line,
- ref int[string] line_occur,
- ref string[string] an_object,
- ref int[string] type
-) {
- if (matchFirst(line, rgx.header_make)) {
- /+ matched header_make +/
- debug(header1) { // header
- // tell_l("yellow", line);
- }
- type["header"] = State.on;
- type["header_make"] = State.on;
- type["header_meta"] = State.off;
- ++line_occur["header_make"];
- an_object["obj"] ~= line ~= "\n";
- } else if (matchFirst(line, rgx.header_meta)) {
- /+ matched header_metadata +/
- debug(header1) { // header
- // tell_l("yellow", line);
- }
- type["header"] = State.on;
- type["header_make"] = State.off;
- type["header_meta"] = State.on;
- ++line_occur["header_meta"];
- an_object["obj"] ~= line ~= "\n";
- } else if (type["header_make"] == State.on
- && (line_occur["header_make"] > State.off)) {
- /+ header_make flag set +/
- if (matchFirst(line, rgx.header_sub)) {
- /+ sub-header +/
- debug(header1) {
- // tell_l("yellow", line);
- }
- // type["header"] = State.on;
- ++line_occur["header_make"];
- an_object["obj"] ~= line ~= "\n";
- }
- } else if (type["header_meta"] == State.on
- && (line_occur["header_meta"] > State.off)) {
- /+ header_metadata flag set +/
- if (matchFirst(line, rgx.header_sub)) {
- /+ sub-header +/
- debug(header1) {
- // tell_l("yellow", line);
- }
- ++line_occur["header_meta"];
- an_object["obj"] ~= line ~= "\n";
- }
- }
- // return 0;
- return an_object;
-}
-#+END_SRC
-
-** header array :header:
-#+name: ao_markup_header_extract
-#+BEGIN_SRC d
-auto header_set_common(
- ref int[string] line_occur,
- ref string[string] an_object,
- ref int[string] type
-) {
- // line_occur["header"] = State.off;
- line_occur["header_make"] = State.off;
- line_occur["header_meta"] = State.off;
- type["header"] = State.off;
- // type["header_make"] = State.off;
- // type["header_meta"] = State.off;
- an_object.remove("obj");
- an_object.remove("is");
- an_object.remove("attrib");
-}
-private auto headerContentJSON(in char[] src_header) {
- auto type = flags_type_init;
- type = [
- "header" : State.off,
- "header_make" : State.off,
- "header_meta" : State.off,
- ];
- string[string] an_object;
- int[string] line_occur;
- auto dochead_make = parseJSON(header_make_jsonstr).object;
- auto dochead_meta = parseJSON(header_meta_jsonstr).object;
- auto set_header = HeaderDocMetadataMakeJson();
- char[][] source_header_arr =
- split(cast(char[]) src_header, rgx.line_delimiter);
- foreach(header_line; source_header_arr) {
- if (auto m = matchFirst(header_line, rgx.comment)) {
- /+ matched comment +/
- debug(comment) {
- // tell_l("blue", header_line);
- }
- header_set_common(line_occur, an_object, type);
- // type["header_make"] = State.off;
- // type["header_meta"] = State.off;
- } else if ((matchFirst(header_line, rgx.header))
- || (type["header_make"] == State.on
- && (line_occur["header_make"] > State.off))
- || (type["header_meta"] == State.on
- && (line_occur["header_meta"] > State.off))) {
- if (header_line.length == 0) {
- /+ header_make instructions (current line empty) +/
- auto dochead_metadata_and_make =
- set_header.header_metadata_and_make_jsonstr(strip(an_object["obj"]), dochead_meta, dochead_make);
- static assert(!isTypeTuple!(dochead_metadata_and_make));
- dochead_meta = dochead_metadata_and_make[0];
- dochead_make = dochead_metadata_and_make[1];
- header_set_common(line_occur, an_object, type);
- type["header_make"] = State.off;
- type["header_meta"] = State.off;
- writeln(dochead_metadata_and_make);
- } else {
- an_object = header_extract(header_line, line_occur, an_object, type);
- }
- } else {
- // writeln(__LINE__);
- }
- }
- auto t = tuple(
- dochead_make,
- dochead_meta,
- );
- return t;
-}
-#+END_SRC
-
-** +header document metadata+ :document:metadata:
-*** +within abstraction loop+
-
-**** +line exist: header make+ :header:make:
-# #+name: abs_in_loop_body_not_block_obj
-# #+BEGIN_SRC d
-# } else if (line_occur["header_make"] > State.off) {
-# /+ header_make +/
-# // should be caught by sub-header
-# debug(header) {
-# tell_l("red", line);
-# }
-# an_object["obj"] ~= line ~= "\n";
-# ++line_occur["header_make"];
-# #+END_SRC
-
-**** +line exist: header metadata+ :header:metadata:
-# #+name: abs_in_loop_body_not_block_obj
-# #+BEGIN_SRC d
-# } else if (line_occur["header_meta"] > State.off) {
-# /+ header_metadata +/
-# // should be caught by sub-header
-# debug(header) { // para
-# tell_l("red", line);
-# }
-# an_object["obj"] ~= line ~= "\n";
-# ++line_occur["header_meta"];
-# #+END_SRC
-
-**** +header_make instructions+ :header:make:instructions:
-
-# #+name: abs_in_loop_body_not_block_obj_line_empty
-# #+BEGIN_SRC d
-# if ((type["header_make"] == State.on)
-# && (line_occur["header_make"] > State.off)) {
-# /+ header_make instructions (current line empty) +/
-# auto dochead_metadata_and_make =
-# set_header.header_metadata_and_make_jsonstr(strip(an_object["obj"]), dochead_meta, dochead_make);
-# static assert(!isTypeTuple!(dochead_metadata_and_make));
-# dochead_meta = dochead_metadata_and_make[0];
-# dochead_make = dochead_metadata_and_make[1];
-# header_set_common(line_occur, an_object, type);
-# processing.remove("verse");
-# #+END_SRC
-
-**** +header_metadata+ :header:metadata:
-
-# #+name: abs_in_loop_body_not_block_obj_line_empty
-# #+BEGIN_SRC d
-# } else if ((type["header_meta"] == State.on)
-# && (line_occur["header_meta"] > State.off)) {
-# /+ header_meta (current line empty) +/
-# auto dochead_metadata_and_make =
-# set_header.header_metadata_and_make_jsonstr(strip(an_object["obj"]), dochead_meta, dochead_make);
-# static assert(!isTypeTuple!(dochead_metadata_and_make));
-# dochead_meta = dochead_metadata_and_make[0];
-# dochead_make = dochead_metadata_and_make[1];
-# header_set_common(line_occur, an_object, type);
-# type["header_make"] = State.off;
-# type["header_meta"] = State.off;
-# processing.remove("verse");
-# #+END_SRC
-
-* tangles (code structure) :tangle:
-** ao_markup_header_extract.d: :ao_markup_header_extract.d:
-#+BEGIN_SRC d :tangle ../src/sdp/ao_header_extract.d
-/+
- extract header return json
-+/
-template SiSUheaderExtract() {
- private import
- std.exception,
- std.regex,
- std.utf,
- std.conv : to;
- private import
- ao_rgx; // ao_defaults.d
- struct HeaderDocMetadataMakeJson {
- mixin SiSUrgxInitFlags;
- mixin RgxInit;
- auto rgx = Rgx();
- enum State { off, on }
- string hm, hs;
- <<ao_markup_header_extract>>
- }
-}
-#+END_SRC
diff --git a/org/ao_output_debugs.org b/org/ao_output_debugs.org
index 6f6a6c8..99a3301 100644
--- a/org/ao_output_debugs.org
+++ b/org/ao_output_debugs.org
@@ -87,7 +87,7 @@ debug(objects) {
}
#+END_SRC
** (headermakejson) :json:header:
-#+name: ao_output_debugs
+##+name: ao_output_debugs
#+BEGIN_SRC d
debug(headermakejson) {
writefln(
@@ -117,7 +117,7 @@ debug(headermakejson) {
}
#+END_SRC
** (headermetadatajson) :json:header:
-#+name: ao_output_debugs
+##+name: ao_output_debugs
#+BEGIN_SRC d
debug(headermetadatajson) {
writefln(
@@ -460,8 +460,8 @@ template SiSUoutputDebugs() {
auto ref const S contents,
string[][string][string] bookindex_unordered_hashes,
JSONValue[] biblio,
- JSONValue[string] dochead_make,
- JSONValue[string] dochead_meta,
+ // JSONValue[string] dochead_make,
+ // JSONValue[string] dochead_meta,
string fn_src,
bool[string] opt_action_bool
) {
diff --git a/org/ao_read_source_files.org b/org/ao_read_source_files.org
index 2d41105..05e42ec 100644
--- a/org/ao_read_source_files.org
+++ b/org/ao_read_source_files.org
@@ -13,6 +13,64 @@
#+TAGS: assert(a) class(c) debug(d) mixin(m) sdp(s) tangle(T) template(t) WEB(W) noexport(n)
[[./sdp.org][sdp]] [[./][org/]]
+* get config file :config:
+
+** [#A] read config file, source string :string:
+*** config file :file:config:
+#+name: ao_config_file
+#+BEGIN_SRC d
+final private string readInConfigFile() {
+ // enforce(
+ // exists(fn_src)!=0,
+ // "file not found"
+ // );
+ string[] possible_config_path_locations = [
+ environment["PWD"] ~ "/.sisu",
+ environment["PWD"] ~ "/_sisu",
+ environment["HOME"] ~ "/.sisu",
+ "/etc/sisu"
+ ];
+ string conf_sdl = "conf.sdl";
+ string config_file_str;
+ foreach(pth; possible_config_path_locations) {
+ auto conf_file = format(
+ "%s/%s",
+ pth,
+ conf_sdl,
+ );
+ // writeln(conf_file);
+ try {
+ if (exists(conf_file)) {
+ writeln(conf_file);
+ config_file_str = readText(conf_file);
+ break;
+ }
+ }
+ catch (ErrnoException ex) {
+ //// Handle errors
+ // switch(ex.errno) {
+ // case EPERM:
+ // case EACCES:
+ // // Permission denied
+ // break;
+ // case ENOENT:
+ // // File does not exist
+ // break;
+ // default:
+ // // Handle other errors
+ // break;
+ // }
+ }
+ // catch (UTFException ex) {
+ // // Handle validation errors
+ // }
+ catch (FileException ex) {
+ // Handle errors
+ }
+ }
+ return config_file_str;
+}
+#+END_SRC
* get markup source, read file :source:markup:
@@ -91,6 +149,68 @@ final private char[][] header0Content1(in string src_text) {
}
#+END_SRC
+** header sdlang
+
+#+name: ao_header_extract_sdl
+#+BEGIN_SRC d
+final private auto headerMakeSDLang(in string src_header) {
+ scope(failure) {
+ stderr.writefln(
+ "%s\n%s\n%s:%s failed here:\n src_header: %s",
+ __MODULE__, __FUNCTION__,
+ __FILE__, __LINE__,
+ src_header,
+ );
+ }
+ Tag sdl_root_header;
+ try {
+ sdl_root_header = parseSource(src_header);
+ }
+ catch(SDLangParseException e) {
+ stderr.writeln("SDLang problem with this document header:");
+ stderr.writeln(src_header);
+ // Error messages of the form:
+ // myFile.sdl(5:28): Error: Invalid integer suffix.
+ stderr.writeln(e.msg);
+ }
+ debug(sdlang) {
+ // // Value is a std.variant.Algebraic
+ // Value output_dir_structure_by = sdl_root_header.tags["output_dir_structure_by"][0].values[0];
+ // assert(output_dir_structure_by.type == typeid(string));
+ // writeln(output_dir_structure_by);
+
+ // Tag person = sdl_root_header.namespaces["myNamespace"].tags["person"][0];
+ // writeln("Name: ", person.attributes["name"][0].value);
+ //
+ // int age = person.tags["age"][0].values[0].get!int();
+ // writeln("Age: ", age);
+
+ writeln("header SDL:");
+ writeln(sdl_root_header.toSDLDocument());
+ }
+ return sdl_root_header;
+}
+#+END_SRC
+
+** header sdlang :header:
+#+name: ao_header_extract_sdl
+#+BEGIN_SRC d
+private auto headerSDLang(in char[] src_header) {
+ char[][] source_header_arr =
+ split(cast(char[]) src_header, rgx.line_delimiter);
+ char[] header_clean;
+ foreach(header_line; source_header_arr) {
+ if (!match(header_line, rgx.comments)) {
+ header_clean ~= header_line ~ "\n";
+ // writeln(header_line);
+ }
+ }
+ // writeln(header_clean); // consider
+ auto header_sdlang=headerMakeSDLang(to!string(header_clean));
+ return header_sdlang;
+}
+#+END_SRC
+
** source line array :array:
#+name: ao_markup_source_raw
#+BEGIN_SRC d
@@ -332,7 +452,7 @@ template SiSUmarkupRaw() {
auto raw = MarkupRawUnit();
auto t =
raw.markupSourceHeaderContentRawLineTupleArray(fn_src, rgx.src_pth);
- auto header_content_raw = t[0];
+ auto header_raw = t[0];
auto sourcefile_content = t[1];
if (match(fn_src, rgx.src_fn_master)) {
auto ins = Inserts();
@@ -341,16 +461,19 @@ template SiSUmarkupRaw() {
// auto ins = SiSUdocInserts.Inserts();
}
t = tuple(
- header_content_raw,
+ header_raw,
sourcefile_content
);
return t;
}
}
private
+ struct HeaderExtractSDL {
+ <<ao_header_extract_sdl>>
+ }
struct MarkupRawUnit {
private import std.file;
- enum State { off, on }
+ // enum State { off, on }
<<ao_markup_source_raw>>
}
struct Inserts {
@@ -385,3 +508,52 @@ template SiSUmarkupRaw() {
INSERTS?
[[./ao_scan_inserts.org][ao_scan_inserts]]
WORK AREA
+
+** config files: :ao_config_files.d:
+
+#+BEGIN_SRC d :tangle ../src/sdp/ao_read_config_files.d
+/+
+ ao_config_files.d
+ - read config files
++/
+template SiSUconfiguration() {
+ private import
+ std.exception,
+ // std.regex,
+ std.stdio,
+ std.utf,
+ std.conv : to;
+ // private import
+ // ao_rgx; // ao_defaults.d
+ // mixin RgxInit;
+ // auto rgx = Rgx();
+ private
+ struct Config {
+ private import std.file;
+ <<ao_config_file>>
+ }
+}
+#+END_SRC
+
+* figure out
+** break up file here to sisu markup content and header
+
+break up file here to sisu markup content and header
+
+*** header
+take master and single sst file, read in as header until the required header 0~
+keep separate (from content) for extraction of header metadata & make detail
+also now may be sdlang or old sisu markup!
+
+*** content
+from 0~ read in as content
+
+** what
+# #+NAME: sdp_each_file_do
+# #+BEGIN_SRC d
+/+ ↓ read file +/
+// auto conf = MarkupRaw();
+auto conf = Config();
+auto configfile_content =
+ conf.sourceConfig(fn_src);
+# #+END_SRC
diff --git a/org/output.org b/org/output.org
index 3575558..e8187d6 100644
--- a/org/output.org
+++ b/org/output.org
@@ -1537,29 +1537,6 @@ template SiSUoutputHub() {
}
#+END_SRC
-** dump
-
-template SiSUoutput() {
- struct SDPoutput {
- auto html(S)(
- auto ref const S contents,
- JSONValue[string] dochead_make,
- JSONValue[string] dochead_meta,
- string[][string][string] bookindex_unordered_hashes,
- JSONValue[] biblio,
- string fn_src,
- bool[string] opt_action_bool
- ) {
- mixin RgxInit;
- mixin ScreenTxtColors;
- auto rgx = Rgx();
- <<ao_output_html>>
- <<ao_output_html_summary>>
- }
- }
- }
-}
-
** head
<!DOCTYPE html>
<html>
diff --git a/org/sdp.org b/org/sdp.org
index 80b7a45..4d221bb 100644
--- a/org/sdp.org
+++ b/org/sdp.org
@@ -28,7 +28,7 @@ struct Version {
int minor;
int patch;
}
-enum ver = Version(0, 4, 1);
+enum ver = Version(0, 5, 0);
#+END_SRC
** pre loop init
@@ -60,7 +60,7 @@ import
compile_time_info, // sdp/compile_time_info.d
ao_abstract_doc_source, // sdp/ao_abstract_doc_source.d
ao_defaults, // sdp/ao_defaults.d
- ao_header_extract, // sdp/ao_header_extract.d
+ ao_read_config_files, // sdp/ao_read_config_files.d
ao_read_source_files, // sdp/ao_read_source_files.d
ao_output_debugs, // sdp/ao_output_debugs.d
ao_rgx, // sdp/ao_rgx.d
@@ -69,6 +69,16 @@ import
// std.conv;
#+END_SRC
+**** sdlang :import:sdlang:
+#+NAME: sdlang_imports_use
+#+BEGIN_SRC d
+/+ sdlang http://sdlang.org +/
+import sdlang; // sdlang.d
+ // sdlang.parser, // sdlang/parser.d
+ // sdlang.exceptions; // sdp/ao_ansi_colors.d
+ // // std.conv;
+#+END_SRC
+
**** std :import:std:
#+NAME: sdp_imports
#+BEGIN_SRC d
@@ -95,8 +105,9 @@ private import
std.conv : to;
#+END_SRC
-*** sdp output :output:
-#+NAME: sdp_args
+**** sdp output check selection :output:
+
+#+NAME: sdp_output_selection
#+BEGIN_SRC d
struct SDPoutput {
auto hub(S)(
@@ -163,10 +174,9 @@ mixin(import("version.txt"));
#+NAME: sdp_args
#+BEGIN_SRC d
mixin SiSUheaderSkel;
-mixin SiSUheaderExtract;
mixin SiSUbiblio;
mixin SiSUrgxInitFlags;
-// mixin SiSUconfiguration;
+mixin SiSUconfiguration;
mixin SiSUmarkupRaw;
mixin SiSUdocAbstraction;
mixin SiSUoutputDebugs;
@@ -179,7 +189,7 @@ mixin ScreenTxtColors;
#+NAME: sdp_args
#+BEGIN_SRC d
auto raw = MarkupRaw();
-auto head = HeaderDocMetadataMakeJson();
+auto headsdl = HeaderExtractSDL();
auto abs = Abstraction();
auto dbg = SDPoutputDebugs();
auto output = SDPoutput();
@@ -294,6 +304,62 @@ foreach(arg; args) {
}
#+END_SRC
+*** config files (load) :config:files:
+#+BEGIN_SRC text
+./.sisu ./_sisu ~/.sisu /etc/.sisu
+#+END_SRC
+
+#+BEGIN_SRC d
+// string[string] envVars = environment.toAA();
+// writeln(envVars);
+/+
+writefln(
+ "pwd: %s; home: %s",
+ environment["PWD"],
+ environment["HOME"]
+);
++/
+#+END_SRC
+
+**** config load
+#+NAME: sdp_config_files
+#+BEGIN_SRC d
+auto conf = Config();
+auto configuration = conf.readInConfigFile();
+#+END_SRC
+
+**** config read
+
+#+NAME: sdp_config_files
+#+BEGIN_SRC d
+/+ sdlang config +/
+Tag sdl_root_conf;
+try {
+ sdl_root_conf = parseSource(configuration);
+}
+catch(SDLangParseException e) {
+ stderr.writeln("SDLang problem with config.sdl content");
+ // Error messages of the form:
+ // myFile.sdl(5:28): Error: Invalid integer suffix.
+ stderr.writeln(e.msg);
+}
+debug(sdlang) {
+ // Value is a std.variant.Algebraic
+ Value output_dir_structure_by = sdl_root_conf.tags["output_dir_structure_by"][0].values[0];
+ assert(output_dir_structure_by.type == typeid(string));
+ writeln(output_dir_structure_by);
+
+ // Tag person = sdl_root_conf.namespaces["myNamespace"].tags["person"][0];
+ // writeln("Name: ", person.attributes["name"][0].value);
+ //
+ // int age = person.tags["age"][0].values[0].get!int();
+ // writeln("Age: ", age);
+
+ writeln("config SDL:");
+ writeln(sdl_root_conf.toSDLDocument());
+}
+#+END_SRC
+
** each file (loop) [+2] :loop:files:
*** filename provided [+1] :file:process:
**** loop scope :scope:
@@ -342,18 +408,15 @@ debug(header_and_content) {
**** [#A] read doc header: metadata & make :doc:header:metadata:make:
#+NAME: sdp_each_file_do
#+BEGIN_SRC d
-/+ ↓ headers metadata & make +/
-auto header_content = head.headerContentJSON(header);
-static assert(!isTypeTuple!(header_content));
-auto dochead_make_json = header_content[0];
-auto dochead_meta_json = header_content[1];
+/+ ↓ headers metadata & make sdlang +/
+auto header_sdlang = headsdl.headerSDLang(header);
#+END_SRC
**** [#A] processing: document abstraction, tuple :processing:
#+NAME: sdp_each_file_do
#+BEGIN_SRC d
/+ ↓ porcess document, return abstraction as tuple +/
-auto t = abs.abstract_doc_source(sourcefile_content, dochead_make_json, dochead_meta_json);
+auto t = abs.abstract_doc_source(sourcefile_content);
static assert(!isTypeTuple!(t));
auto doc_ao_contents = t[0]; // contents ~ endnotes ~ bookindex;
// static assert(!isIterable!(doc_ao_contents));
@@ -372,8 +435,8 @@ debug(checkdoc) { // checkbook & dumpdoc
doc_ao_contents,
doc_ao_bookindex_unordered_hashes,
doc_ao_biblio,
- dochead_make_json,
- dochead_meta_json,
+ // doc_ao_make_json,
+ // doc_ao_metadata_json,
fn_src,
opt_action_bool
);
@@ -436,13 +499,16 @@ break;
sdp.d
+/
<<sdp_imports_use>>
+<<sdlang_imports_use>>
<<sdp_imports>>
+<<sdp_output_selection>>
<<sdp_version_mixin>>
mixin CompileTimeInfo;
mixin RgxInit;
void main(string[] args) {
<<sdp_compilation>>
<<sdp_args>>
+ <<sdp_config_files>>
foreach(fn_src; fns_src) {
// foreach(fn_src; fns_src) {
if (!empty(fn_src)) {
@@ -505,3 +571,97 @@ figure out best program dir structure for dub and compilers, issue with rdmd
|---------------------+------------------------------------------+------------------------+--------|
| metadata | | (from regular header) | output |
|---------------------+------------------------------------------+------------------------+--------|
+
+** config :config:
+using sdlang in sdp
+*** sdp config and header? file format? sdl ? yml ? json ? :sdl:sdlang:
+
+[[https://sdlang.org/][SDL: Simple Declarative Language]] [[http://sdl4r.rubyforge.org/syntaxhighlighter_brush.html][highlighter]]
+https://github.com/Abscissa/SDLang-D
+https://github.com/Abscissa/SDLang-D/blob/master/HOWTO.md
+
+**** build/ compile
+
+The recommended way to use SDLang-D is via DUB. Just add a dependency to
+sdlang-d in your project's dub.json or dub.sdl file as shown here. Then simply
+build your project with DUB as usual. dub dependency
+http://code.dlang.org/packages/sdlang-d
+
+
+Alternatively, you can git clone both SDLang-D and the latest version of
+libInputVisitor,
+
+#+BEGIN_SRC d :tangle no
+git clone https://github.com/Abscissa/SDLang-D
+git clone https://github.com/abscissa/libInputVisitor
+#+END_SRC
+
+ and when running the compiler include:
+
+#+BEGIN_SRC d :tangle no
+ -I{path to SDLang-D}/src -I{path to libInputVisitor}/src
+#+END_SRC
+
+**** Importing
+
+To use SDL, first import the module sdlang:
+
+#+BEGIN_SRC d :tangle no
+import sdlang;
+#+END_SRC
+
+If you're not using DUB, then you must also include the path the SDLang-D sources when you compile:
+
+#+BEGIN_SRC d :tangle no
+rdmd --build-only -I{path to sdlang}/src -I{path to libInputVisitor}/src {other flags} yourProgram.d
+#+END_SRC
+
+**** misc
+http://forum.dlang.org/thread/hphtqkkmrfnlcipnxzai@forum.dlang.org
+http://forum.dlang.org/thread/gnfctbuhiemidetngrzi@forum.dlang.org?page=23#post-rlxlfveyyzgewhkxhhta:40forum.dlang.org
+
+*** other links
+http://semitwist.com/sdlang-d-docs/v0.9.3/sdlang.html http://semitwist.com/sdlang-d-docs/
+
+** read markup files
+**** regular .sst
+relatively straight forward
+**** master .ssm
+master files have been able to read in inser files .ssi and regular files .sst
+***** reading in .ssi files is straightforward
+***** reading in .sst files is more problematic
+.sst files have their own root (structure)
+either
+- the root needs to be disabled - not used
+or
+- the root tree needs to be demoted, which is only possible if markup from
+ heading D is not reached then A - C could be demoted to B - D
+- the other issue there is that it is common not to write out heading level A
+ text but to rely on the metadata for title and author, the issue there is that
+ at present the header for .sst files that are imported is just lopped off and
+ thrown away. At least the title and author information for each imported .sst
+ file would have to read and available for use in its header A that is demoted
+ to B
+
+** processing files, currently using utf8
+** src dir structure & files
+#+BEGIN_SRC txt :tangle no
+tree /home/ralph/sisu_www/current/src/democratizing_innovation.eric_von_hippel.sst
+
+/home/ralph/sisu_www/current/src/
+democratizing_innovation.eric_von_hippel.sst
+└── sisupod
+ ├── doc
+ │   ├── en
+ │   │   └── democratizing_innovation.eric_von_hippel.sst
+ │   └── _sisu
+ │   └── sisu_document_make // [interesting as part of larger conf.sdl]
+ └── image
+ ├── di_evh_f10-1.png
+ ├── di_evh_f11-1.png
+ ├── di_evh_f11-2.png
+ ├── di_evh_f1-1.png
+ ├── di_evh_f5-1.png
+ └── di_evh.png
+
+#+END_SRC
diff --git a/src/sdlang/ast.d b/src/sdlang/ast.d
new file mode 100644
index 0000000..7ad1c30
--- /dev/null
+++ b/src/sdlang/ast.d
@@ -0,0 +1,1834 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.ast;
+
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.range;
+import std.string;
+
+version(sdlangUnittest)
+version(unittest)
+{
+ import std.stdio;
+ import std.exception;
+}
+
+import sdlang.exception;
+import sdlang.token;
+import sdlang.util;
+
+class Attribute
+{
+ Value value;
+ Location location;
+
+ private Tag _parent;
+ /// Get parent tag. To set a parent, attach this Attribute to its intended
+ /// parent tag by calling 'Tag.add(...)', or by passing it to
+ /// the parent tag's constructor.
+ @property Tag parent()
+ {
+ return _parent;
+ }
+
+ private string _namespace;
+ @property string namespace()
+ {
+ return _namespace;
+ }
+ /// Not particularly efficient, but it works.
+ @property void namespace(string value)
+ {
+ if(_parent && _namespace != value)
+ {
+ // Remove
+ auto saveParent = _parent;
+ if(_parent)
+ this.remove();
+
+ // Change namespace
+ _namespace = value;
+
+ // Re-add
+ if(saveParent)
+ saveParent.add(this);
+ }
+ else
+ _namespace = value;
+ }
+
+ private string _name;
+ /// Not including namespace. Use 'fullName' if you want the namespace included.
+ @property string name()
+ {
+ return _name;
+ }
+ /// Not the most efficient, but it works.
+ @property void name(string value)
+ {
+ if(_parent && _name != value)
+ {
+ _parent.updateId++;
+
+ void removeFromGroupedLookup(string ns)
+ {
+ // Remove from _parent._attributes[ns]
+ auto sameNameAttrs = _parent._attributes[ns][_name];
+ auto targetIndex = sameNameAttrs.countUntil(this);
+ _parent._attributes[ns][_name].removeIndex(targetIndex);
+ }
+
+ // Remove from _parent._tags
+ removeFromGroupedLookup(_namespace);
+ removeFromGroupedLookup("*");
+
+ // Change name
+ _name = value;
+
+ // Add to new locations in _parent._attributes
+ _parent._attributes[_namespace][_name] ~= this;
+ _parent._attributes["*"][_name] ~= this;
+ }
+ else
+ _name = value;
+ }
+
+ @property string fullName()
+ {
+ return _namespace==""? _name : text(_namespace, ":", _name);
+ }
+
+ this(string namespace, string name, Value value, Location location = Location(0, 0, 0))
+ {
+ this._namespace = namespace;
+ this._name = name;
+ this.location = location;
+ this.value = value;
+ }
+
+ this(string name, Value value, Location location = Location(0, 0, 0))
+ {
+ this._namespace = "";
+ this._name = name;
+ this.location = location;
+ this.value = value;
+ }
+
+ /// Removes 'this' from its parent, if any. Returns 'this' for chaining.
+ /// Inefficient ATM, but it works.
+ Attribute remove()
+ {
+ if(!_parent)
+ return this;
+
+ void removeFromGroupedLookup(string ns)
+ {
+ // Remove from _parent._attributes[ns]
+ auto sameNameAttrs = _parent._attributes[ns][_name];
+ auto targetIndex = sameNameAttrs.countUntil(this);
+ _parent._attributes[ns][_name].removeIndex(targetIndex);
+ }
+
+ // Remove from _parent._attributes
+ removeFromGroupedLookup(_namespace);
+ removeFromGroupedLookup("*");
+
+ // Remove from _parent.allAttributes
+ auto allAttrsIndex = _parent.allAttributes.countUntil(this);
+ _parent.allAttributes.removeIndex(allAttrsIndex);
+
+ // Remove from _parent.attributeIndicies
+ auto sameNamespaceAttrs = _parent.attributeIndicies[_namespace];
+ auto attrIndiciesIndex = sameNamespaceAttrs.countUntil(allAttrsIndex);
+ _parent.attributeIndicies[_namespace].removeIndex(attrIndiciesIndex);
+
+ // Fixup other indicies
+ foreach(ns, ref nsAttrIndicies; _parent.attributeIndicies)
+ foreach(k, ref v; nsAttrIndicies)
+ if(v > allAttrsIndex)
+ v--;
+
+ _parent.removeNamespaceIfEmpty(_namespace);
+ _parent.updateId++;
+ _parent = null;
+ return this;
+ }
+
+ override bool opEquals(Object o)
+ {
+ auto a = cast(Attribute)o;
+ if(!a)
+ return false;
+
+ return
+ _namespace == a._namespace &&
+ _name == a._name &&
+ value == a.value;
+ }
+
+ string toSDLString()()
+ {
+ Appender!string sink;
+ this.toSDLString(sink);
+ return sink.data;
+ }
+
+ void toSDLString(Sink)(ref Sink sink) if(isOutputRange!(Sink,char))
+ {
+ if(_namespace != "")
+ {
+ sink.put(_namespace);
+ sink.put(':');
+ }
+
+ sink.put(_name);
+ sink.put('=');
+ value.toSDLString(sink);
+ }
+}
+
+class Tag
+{
+ Location location;
+ Value[] values;
+
+ private Tag _parent;
+ /// Get parent tag. To set a parent, attach this Tag to its intended
+ /// parent tag by calling 'Tag.add(...)', or by passing it to
+ /// the parent tag's constructor.
+ @property Tag parent()
+ {
+ return _parent;
+ }
+
+ private string _namespace;
+ @property string namespace()
+ {
+ return _namespace;
+ }
+ /// Not particularly efficient, but it works.
+ @property void namespace(string value)
+ {
+ if(_parent && _namespace != value)
+ {
+ // Remove
+ auto saveParent = _parent;
+ if(_parent)
+ this.remove();
+
+ // Change namespace
+ _namespace = value;
+
+ // Re-add
+ if(saveParent)
+ saveParent.add(this);
+ }
+ else
+ _namespace = value;
+ }
+
+ private string _name;
+ /// Not including namespace. Use 'fullName' if you want the namespace included.
+ @property string name()
+ {
+ return _name;
+ }
+ /// Not the most efficient, but it works.
+ @property void name(string value)
+ {
+ if(_parent && _name != value)
+ {
+ _parent.updateId++;
+
+ void removeFromGroupedLookup(string ns)
+ {
+ // Remove from _parent._tags[ns]
+ auto sameNameTags = _parent._tags[ns][_name];
+ auto targetIndex = sameNameTags.countUntil(this);
+ _parent._tags[ns][_name].removeIndex(targetIndex);
+ }
+
+ // Remove from _parent._tags
+ removeFromGroupedLookup(_namespace);
+ removeFromGroupedLookup("*");
+
+ // Change name
+ _name = value;
+
+ // Add to new locations in _parent._tags
+ _parent._tags[_namespace][_name] ~= this;
+ _parent._tags["*"][_name] ~= this;
+ }
+ else
+ _name = value;
+ }
+
+ /// This tag's name, including namespace if one exists.
+ @property string fullName()
+ {
+ return _namespace==""? _name : text(_namespace, ":", _name);
+ }
+
+ // Tracks dirtiness. This is incremented every time a change is made which
+ // could invalidate existing ranges. This way, the ranges can detect when
+ // they've been invalidated.
+ private size_t updateId=0;
+
+ this(Tag parent = null)
+ {
+ if(parent)
+ parent.add(this);
+ }
+
+ this(
+ string namespace, string name,
+ Value[] values=null, Attribute[] attributes=null, Tag[] children=null
+ )
+ {
+ this(null, namespace, name, values, attributes, children);
+ }
+
+ this(
+ Tag parent, string namespace, string name,
+ Value[] values=null, Attribute[] attributes=null, Tag[] children=null
+ )
+ {
+ this._namespace = namespace;
+ this._name = name;
+
+ if(parent)
+ parent.add(this);
+
+ this.values = values;
+ this.add(attributes);
+ this.add(children);
+ }
+
+ private Attribute[] allAttributes; // In same order as specified in SDL file.
+ private Tag[] allTags; // In same order as specified in SDL file.
+ private string[] allNamespaces; // In same order as specified in SDL file.
+
+ private size_t[][string] attributeIndicies; // allAttributes[ attributes[namespace][i] ]
+ private size_t[][string] tagIndicies; // allTags[ tags[namespace][i] ]
+
+ private Attribute[][string][string] _attributes; // attributes[namespace or "*"][name][i]
+ private Tag[][string][string] _tags; // tags[namespace or "*"][name][i]
+
+ /// Adds a Value, Attribute, Tag (or array of such) as a member/child of this Tag.
+ /// Returns 'this' for chaining.
+ /// Throws 'SDLangValidationException' if trying to add an Attribute or Tag
+ /// that already has a parent.
+ Tag add(Value val)
+ {
+ values ~= val;
+ updateId++;
+ return this;
+ }
+
+ ///ditto
+ Tag add(Value[] vals)
+ {
+ foreach(val; vals)
+ add(val);
+
+ return this;
+ }
+
+ ///ditto
+ Tag add(Attribute attr)
+ {
+ if(attr._parent)
+ {
+ throw new SDLangValidationException(
+ "Attribute is already attached to a parent tag. "~
+ "Use Attribute.remove() before adding it to another tag."
+ );
+ }
+
+ if(!allNamespaces.canFind(attr._namespace))
+ allNamespaces ~= attr._namespace;
+
+ attr._parent = this;
+
+ allAttributes ~= attr;
+ attributeIndicies[attr._namespace] ~= allAttributes.length-1;
+ _attributes[attr._namespace][attr._name] ~= attr;
+ _attributes["*"] [attr._name] ~= attr;
+
+ updateId++;
+ return this;
+ }
+
+ ///ditto
+ Tag add(Attribute[] attrs)
+ {
+ foreach(attr; attrs)
+ add(attr);
+
+ return this;
+ }
+
+ ///ditto
+ Tag add(Tag tag)
+ {
+ if(tag._parent)
+ {
+ throw new SDLangValidationException(
+ "Tag is already attached to a parent tag. "~
+ "Use Tag.remove() before adding it to another tag."
+ );
+ }
+
+ if(!allNamespaces.canFind(tag._namespace))
+ allNamespaces ~= tag._namespace;
+
+ tag._parent = this;
+
+ allTags ~= tag;
+ tagIndicies[tag._namespace] ~= allTags.length-1;
+ _tags[tag._namespace][tag._name] ~= tag;
+ _tags["*"] [tag._name] ~= tag;
+
+ updateId++;
+ return this;
+ }
+
+ ///ditto
+ Tag add(Tag[] tags)
+ {
+ foreach(tag; tags)
+ add(tag);
+
+ return this;
+ }
+
+ /// Removes 'this' from its parent, if any. Returns 'this' for chaining.
+ /// Inefficient ATM, but it works.
+ Tag remove()
+ {
+ if(!_parent)
+ return this;
+
+ void removeFromGroupedLookup(string ns)
+ {
+ // Remove from _parent._tags[ns]
+ auto sameNameTags = _parent._tags[ns][_name];
+ auto targetIndex = sameNameTags.countUntil(this);
+ _parent._tags[ns][_name].removeIndex(targetIndex);
+ }
+
+ // Remove from _parent._tags
+ removeFromGroupedLookup(_namespace);
+ removeFromGroupedLookup("*");
+
+ // Remove from _parent.allTags
+ auto allTagsIndex = _parent.allTags.countUntil(this);
+ _parent.allTags.removeIndex(allTagsIndex);
+
+ // Remove from _parent.tagIndicies
+ auto sameNamespaceTags = _parent.tagIndicies[_namespace];
+ auto tagIndiciesIndex = sameNamespaceTags.countUntil(allTagsIndex);
+ _parent.tagIndicies[_namespace].removeIndex(tagIndiciesIndex);
+
+ // Fixup other indicies
+ foreach(ns, ref nsTagIndicies; _parent.tagIndicies)
+ foreach(k, ref v; nsTagIndicies)
+ if(v > allTagsIndex)
+ v--;
+
+ _parent.removeNamespaceIfEmpty(_namespace);
+ _parent.updateId++;
+ _parent = null;
+ return this;
+ }
+
+ private void removeNamespaceIfEmpty(string namespace)
+ {
+ // If namespace has no attributes, remove it from attributeIndicies/_attributes
+ if(namespace in attributeIndicies && attributeIndicies[namespace].length == 0)
+ {
+ attributeIndicies.remove(namespace);
+ _attributes.remove(namespace);
+ }
+
+ // If namespace has no tags, remove it from tagIndicies/_tags
+ if(namespace in tagIndicies && tagIndicies[namespace].length == 0)
+ {
+ tagIndicies.remove(namespace);
+ _tags.remove(namespace);
+ }
+
+ // If namespace is now empty, remove it from allNamespaces
+ if(
+ namespace !in tagIndicies &&
+ namespace !in attributeIndicies
+ )
+ {
+ auto allNamespacesIndex = allNamespaces.length - allNamespaces.find(namespace).length;
+ allNamespaces = allNamespaces[0..allNamespacesIndex] ~ allNamespaces[allNamespacesIndex+1..$];
+ }
+ }
+
+ struct NamedMemberRange(T, string membersGrouped)
+ {
+ private Tag tag;
+ private string namespace; // "*" indicates "all namespaces" (ok since it's not a valid namespace name)
+ private string name;
+ private size_t updateId; // Tag's updateId when this range was created.
+
+ this(Tag tag, string namespace, string name, size_t updateId)
+ {
+ this.tag = tag;
+ this.namespace = namespace;
+ this.name = name;
+ this.updateId = updateId;
+ frontIndex = 0;
+
+ if(
+ namespace in mixin("tag."~membersGrouped) &&
+ name in mixin("tag."~membersGrouped~"[namespace]")
+ )
+ endIndex = mixin("tag."~membersGrouped~"[namespace][name].length");
+ else
+ endIndex = 0;
+ }
+
+ invariant()
+ {
+ assert(
+ this.updateId == tag.updateId,
+ "This range has been invalidated by a change to the tag."
+ );
+ }
+
+ @property bool empty()
+ {
+ return frontIndex == endIndex;
+ }
+
+ private size_t frontIndex;
+ @property T front()
+ {
+ return this[0];
+ }
+ void popFront()
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ frontIndex++;
+ }
+
+ private size_t endIndex; // One past the last element
+ @property T back()
+ {
+ return this[$-1];
+ }
+ void popBack()
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ endIndex--;
+ }
+
+ alias length opDollar;
+ @property size_t length()
+ {
+ return endIndex - frontIndex;
+ }
+
+ @property typeof(this) save()
+ {
+ auto r = typeof(this)(this.tag, this.namespace, this.name, this.updateId);
+ r.frontIndex = this.frontIndex;
+ r.endIndex = this.endIndex;
+ return r;
+ }
+
+ typeof(this) opSlice()
+ {
+ return save();
+ }
+
+ typeof(this) opSlice(size_t start, size_t end)
+ {
+ auto r = save();
+ r.frontIndex = this.frontIndex + start;
+ r.endIndex = this.frontIndex + end;
+
+ if(
+ r.frontIndex > this.endIndex ||
+ r.endIndex > this.endIndex ||
+ r.frontIndex > r.endIndex
+ )
+ throw new SDLangRangeException("Slice out of range");
+
+ return r;
+ }
+
+ T opIndex(size_t index)
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ return mixin("tag."~membersGrouped~"[namespace][name][frontIndex+index]");
+ }
+ }
+
+ struct MemberRange(T, string allMembers, string memberIndicies, string membersGrouped)
+ {
+ private Tag tag;
+ private string namespace; // "*" indicates "all namespaces" (ok since it's not a valid namespace name)
+ private bool isMaybe;
+ private size_t updateId; // Tag's updateId when this range was created.
+ private size_t initialEndIndex;
+
+ this(Tag tag, string namespace, bool isMaybe)
+ {
+ this.tag = tag;
+ this.namespace = namespace;
+ this.updateId = tag.updateId;
+ this.isMaybe = isMaybe;
+ frontIndex = 0;
+
+ if(namespace == "*")
+ initialEndIndex = mixin("tag."~allMembers~".length");
+ else if(namespace in mixin("tag."~memberIndicies))
+ initialEndIndex = mixin("tag."~memberIndicies~"[namespace].length");
+ else
+ initialEndIndex = 0;
+
+ endIndex = initialEndIndex;
+ }
+
+ invariant()
+ {
+ assert(
+ this.updateId == tag.updateId,
+ "This range has been invalidated by a change to the tag."
+ );
+ }
+
+ @property bool empty()
+ {
+ return frontIndex == endIndex;
+ }
+
+ private size_t frontIndex;
+ @property T front()
+ {
+ return this[0];
+ }
+ void popFront()
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ frontIndex++;
+ }
+
+ private size_t endIndex; // One past the last element
+ @property T back()
+ {
+ return this[$-1];
+ }
+ void popBack()
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ endIndex--;
+ }
+
+ alias length opDollar;
+ @property size_t length()
+ {
+ return endIndex - frontIndex;
+ }
+
+ @property typeof(this) save()
+ {
+ auto r = typeof(this)(this.tag, this.namespace, this.isMaybe);
+ r.frontIndex = this.frontIndex;
+ r.endIndex = this.endIndex;
+ r.initialEndIndex = this.initialEndIndex;
+ r.updateId = this.updateId;
+ return r;
+ }
+
+ typeof(this) opSlice()
+ {
+ return save();
+ }
+
+ typeof(this) opSlice(size_t start, size_t end)
+ {
+ auto r = save();
+ r.frontIndex = this.frontIndex + start;
+ r.endIndex = this.frontIndex + end;
+
+ if(
+ r.frontIndex > this.endIndex ||
+ r.endIndex > this.endIndex ||
+ r.frontIndex > r.endIndex
+ )
+ throw new SDLangRangeException("Slice out of range");
+
+ return r;
+ }
+
+ T opIndex(size_t index)
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ if(namespace == "*")
+ return mixin("tag."~allMembers~"[ frontIndex+index ]");
+ else
+ return mixin("tag."~allMembers~"[ tag."~memberIndicies~"[namespace][frontIndex+index] ]");
+ }
+
+ alias NamedMemberRange!(T,membersGrouped) ThisNamedMemberRange;
+ ThisNamedMemberRange opIndex(string name)
+ {
+ if(frontIndex != 0 || endIndex != initialEndIndex)
+ {
+ throw new SDLangRangeException(
+ "Cannot lookup tags/attributes by name on a subset of a range, "~
+ "only across the entire tag. "~
+ "Please make sure you haven't called popFront or popBack on this "~
+ "range and that you aren't using a slice of the range."
+ );
+ }
+
+ if(!isMaybe && empty)
+ throw new SDLangRangeException("Range is empty");
+
+ if(!isMaybe && name !in this)
+ throw new SDLangRangeException(`No such `~T.stringof~` named: "`~name~`"`);
+
+ return ThisNamedMemberRange(tag, namespace, name, updateId);
+ }
+
+ bool opBinaryRight(string op)(string name) if(op=="in")
+ {
+ if(frontIndex != 0 || endIndex != initialEndIndex)
+ {
+ throw new SDLangRangeException(
+ "Cannot lookup tags/attributes by name on a subset of a range, "~
+ "only across the entire tag. "~
+ "Please make sure you haven't called popFront or popBack on this "~
+ "range and that you aren't using a slice of the range."
+ );
+ }
+
+ return
+ namespace in mixin("tag."~membersGrouped) &&
+ name in mixin("tag."~membersGrouped~"[namespace]") &&
+ mixin("tag."~membersGrouped~"[namespace][name].length") > 0;
+ }
+ }
+
+ struct NamespaceRange
+ {
+ private Tag tag;
+ private bool isMaybe;
+ private size_t updateId; // Tag's updateId when this range was created.
+
+ this(Tag tag, bool isMaybe)
+ {
+ this.tag = tag;
+ this.isMaybe = isMaybe;
+ this.updateId = tag.updateId;
+ frontIndex = 0;
+ endIndex = tag.allNamespaces.length;
+ }
+
+ invariant()
+ {
+ assert(
+ this.updateId == tag.updateId,
+ "This range has been invalidated by a change to the tag."
+ );
+ }
+
+ @property bool empty()
+ {
+ return frontIndex == endIndex;
+ }
+
+ private size_t frontIndex;
+ @property NamespaceAccess front()
+ {
+ return this[0];
+ }
+ void popFront()
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ frontIndex++;
+ }
+
+ private size_t endIndex; // One past the last element
+ @property NamespaceAccess back()
+ {
+ return this[$-1];
+ }
+ void popBack()
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ endIndex--;
+ }
+
+ alias length opDollar;
+ @property size_t length()
+ {
+ return endIndex - frontIndex;
+ }
+
+ @property NamespaceRange save()
+ {
+ auto r = NamespaceRange(this.tag, this.isMaybe);
+ r.frontIndex = this.frontIndex;
+ r.endIndex = this.endIndex;
+ r.updateId = this.updateId;
+ return r;
+ }
+
+ typeof(this) opSlice()
+ {
+ return save();
+ }
+
+ typeof(this) opSlice(size_t start, size_t end)
+ {
+ auto r = save();
+ r.frontIndex = this.frontIndex + start;
+ r.endIndex = this.frontIndex + end;
+
+ if(
+ r.frontIndex > this.endIndex ||
+ r.endIndex > this.endIndex ||
+ r.frontIndex > r.endIndex
+ )
+ throw new SDLangRangeException("Slice out of range");
+
+ return r;
+ }
+
+ NamespaceAccess opIndex(size_t index)
+ {
+ if(empty)
+ throw new SDLangRangeException("Range is empty");
+
+ auto namespace = tag.allNamespaces[frontIndex+index];
+ return NamespaceAccess(
+ namespace,
+ AttributeRange(tag, namespace, isMaybe),
+ TagRange(tag, namespace, isMaybe)
+ );
+ }
+
+ NamespaceAccess opIndex(string namespace)
+ {
+ if(!isMaybe && empty)
+ throw new SDLangRangeException("Range is empty");
+
+ if(!isMaybe && namespace !in this)
+ throw new SDLangRangeException(`No such namespace: "`~namespace~`"`);
+
+ return NamespaceAccess(
+ namespace,
+ AttributeRange(tag, namespace, isMaybe),
+ TagRange(tag, namespace, isMaybe)
+ );
+ }
+
+ /// Inefficient when range is a slice or has used popFront/popBack, but it works.
+ bool opBinaryRight(string op)(string namespace) if(op=="in")
+ {
+ if(frontIndex == 0 && endIndex == tag.allNamespaces.length)
+ {
+ return
+ namespace in tag.attributeIndicies ||
+ namespace in tag.tagIndicies;
+ }
+ else
+ // Slower fallback method
+ return tag.allNamespaces[frontIndex..endIndex].canFind(namespace);
+ }
+ }
+
+ struct NamespaceAccess
+ {
+ string name;
+ AttributeRange attributes;
+ TagRange tags;
+ }
+
+ alias MemberRange!(Attribute, "allAttributes", "attributeIndicies", "_attributes") AttributeRange;
+ alias MemberRange!(Tag, "allTags", "tagIndicies", "_tags" ) TagRange;
+ static assert(isRandomAccessRange!AttributeRange);
+ static assert(isRandomAccessRange!TagRange);
+ static assert(isRandomAccessRange!NamespaceRange);
+
+ /// Access all attributes that don't have a namespace
+ @property AttributeRange attributes()
+ {
+ return AttributeRange(this, "", false);
+ }
+
+ /// Access all direct-child tags that don't have a namespace
+ @property TagRange tags()
+ {
+ return TagRange(this, "", false);
+ }
+
+ /// Access all namespaces in this tag, and the attributes/tags within them.
+ @property NamespaceRange namespaces()
+ {
+ return NamespaceRange(this, false);
+ }
+
+ /// Access all attributes and tags regardless of namespace.
+ @property NamespaceAccess all()
+ {
+ // "*" isn't a valid namespace name, so we can use it to indicate "all namespaces"
+ return NamespaceAccess(
+ "*",
+ AttributeRange(this, "*", false),
+ TagRange(this, "*", false)
+ );
+ }
+
+ struct MaybeAccess
+ {
+ Tag tag;
+
+ /// Access all attributes that don't have a namespace
+ @property AttributeRange attributes()
+ {
+ return AttributeRange(tag, "", true);
+ }
+
+ /// Access all direct-child tags that don't have a namespace
+ @property TagRange tags()
+ {
+ return TagRange(tag, "", true);
+ }
+
+ /// Access all namespaces in this tag, and the attributes/tags within them.
+ @property NamespaceRange namespaces()
+ {
+ return NamespaceRange(tag, true);
+ }
+
+ /// Access all attributes and tags regardless of namespace.
+ @property NamespaceAccess all()
+ {
+ // "*" isn't a valid namespace name, so we can use it to indicate "all namespaces"
+ return NamespaceAccess(
+ "*",
+ AttributeRange(tag, "*", true),
+ TagRange(tag, "*", true)
+ );
+ }
+ }
+
+ /// Access 'attributes', 'tags', 'namespaces' and 'all' like normal,
+ /// except that looking up a non-existant name/namespace with
+ /// opIndex(string) results in an empty array instead of a thrown SDLangRangeException.
+ @property MaybeAccess maybe()
+ {
+ return MaybeAccess(this);
+ }
+
+ override bool opEquals(Object o)
+ {
+ auto t = cast(Tag)o;
+ if(!t)
+ return false;
+
+ if(_namespace != t._namespace || _name != t._name)
+ return false;
+
+ if(
+ values .length != t.values .length ||
+ allAttributes .length != t.allAttributes.length ||
+ allNamespaces .length != t.allNamespaces.length ||
+ allTags .length != t.allTags .length
+ )
+ return false;
+
+ if(values != t.values)
+ return false;
+
+ if(allNamespaces != t.allNamespaces)
+ return false;
+
+ if(allAttributes != t.allAttributes)
+ return false;
+
+ // Ok because cycles are not allowed
+ //TODO: Actually check for or prevent cycles.
+ return allTags == t.allTags;
+ }
+
+ /// Treats 'this' as the root tag. Note that root tags cannot have
+ /// values or attributes, and cannot be part of a namespace.
+ /// If this isn't a valid root tag, 'SDLangValidationException' will be thrown.
+ string toSDLDocument()(string indent="\t", int indentLevel=0)
+ {
+ Appender!string sink;
+ toSDLDocument(sink, indent, indentLevel);
+ return sink.data;
+ }
+
+ ///ditto
+ void toSDLDocument(Sink)(ref Sink sink, string indent="\t", int indentLevel=0)
+ if(isOutputRange!(Sink,char))
+ {
+ if(values.length > 0)
+ throw new SDLangValidationException("Root tags cannot have any values, only child tags.");
+
+ if(allAttributes.length > 0)
+ throw new SDLangValidationException("Root tags cannot have any attributes, only child tags.");
+
+ if(_namespace != "")
+ throw new SDLangValidationException("Root tags cannot have a namespace.");
+
+ foreach(tag; allTags)
+ tag.toSDLString(sink, indent, indentLevel);
+ }
+
+ /// Output this entire tag in SDL format. Does *not* treat 'this' as
+ /// a root tag. If you intend this to be the root of a standard SDL
+ /// document, use 'toSDLDocument' instead.
+ string toSDLString()(string indent="\t", int indentLevel=0)
+ {
+ Appender!string sink;
+ toSDLString(sink, indent, indentLevel);
+ return sink.data;
+ }
+
+ ///ditto
+ void toSDLString(Sink)(ref Sink sink, string indent="\t", int indentLevel=0)
+ if(isOutputRange!(Sink,char))
+ {
+ if(_name == "" && values.length == 0)
+ throw new SDLangValidationException("Anonymous tags must have at least one value.");
+
+ if(_name == "" && _namespace != "")
+ throw new SDLangValidationException("Anonymous tags cannot have a namespace.");
+
+ // Indent
+ foreach(i; 0..indentLevel)
+ sink.put(indent);
+
+ // Name
+ if(_namespace != "")
+ {
+ sink.put(_namespace);
+ sink.put(':');
+ }
+ sink.put(_name);
+
+ // Values
+ foreach(i, v; values)
+ {
+ // Omit the first space for anonymous tags
+ if(_name != "" || i > 0)
+ sink.put(' ');
+
+ v.toSDLString(sink);
+ }
+
+ // Attributes
+ foreach(attr; allAttributes)
+ {
+ sink.put(' ');
+ attr.toSDLString(sink);
+ }
+
+ // Child tags
+ bool foundChild=false;
+ foreach(tag; allTags)
+ {
+ if(!foundChild)
+ {
+ sink.put(" {\n");
+ foundChild = true;
+ }
+
+ tag.toSDLString(sink, indent, indentLevel+1);
+ }
+ if(foundChild)
+ {
+ foreach(i; 0..indentLevel)
+ sink.put(indent);
+
+ sink.put("}\n");
+ }
+ else
+ sink.put("\n");
+ }
+
+ /// Not the most efficient, but it works.
+ string toDebugString()
+ {
+ import std.algorithm : sort;
+
+ Appender!string buf;
+
+ buf.put("\n");
+ buf.put("Tag ");
+ if(_namespace != "")
+ {
+ buf.put("[");
+ buf.put(_namespace);
+ buf.put("]");
+ }
+ buf.put("'%s':\n".format(_name));
+
+ // Values
+ foreach(val; values)
+ buf.put(" (%s): %s\n".format(.toString(val.type), val));
+
+ // Attributes
+ foreach(attrNamespace; _attributes.keys.sort())
+ if(attrNamespace != "*")
+ foreach(attrName; _attributes[attrNamespace].keys.sort())
+ foreach(attr; _attributes[attrNamespace][attrName])
+ {
+ string namespaceStr;
+ if(attr._namespace != "")
+ namespaceStr = "["~attr._namespace~"]";
+
+ buf.put(
+ " %s%s(%s): %s\n".format(
+ namespaceStr, attr._name, .toString(attr.value.type), attr.value
+ )
+ );
+ }
+
+ // Children
+ foreach(tagNamespace; _tags.keys.sort())
+ if(tagNamespace != "*")
+ foreach(tagName; _tags[tagNamespace].keys.sort())
+ foreach(tag; _tags[tagNamespace][tagName])
+ buf.put( tag.toDebugString().replace("\n", "\n ") );
+
+ return buf.data;
+ }
+}
+
+version(sdlangUnittest)
+{
+ private void testRandomAccessRange(R, E)(R range, E[] expected, bool function(E, E) equals=null)
+ {
+ static assert(isRandomAccessRange!R);
+ static assert(is(ElementType!R == E));
+ static assert(hasLength!R);
+ static assert(!isInfinite!R);
+
+ assert(range.length == expected.length);
+ if(range.length == 0)
+ {
+ assert(range.empty);
+ return;
+ }
+
+ static bool defaultEquals(E e1, E e2)
+ {
+ return e1 == e2;
+ }
+ if(equals is null)
+ equals = &defaultEquals;
+
+ assert(equals(range.front, expected[0]));
+ assert(equals(range.front, expected[0])); // Ensure consistent result from '.front'
+ assert(equals(range.front, expected[0])); // Ensure consistent result from '.front'
+
+ assert(equals(range.back, expected[$-1]));
+ assert(equals(range.back, expected[$-1])); // Ensure consistent result from '.back'
+ assert(equals(range.back, expected[$-1])); // Ensure consistent result from '.back'
+
+ // Forward iteration
+ auto original = range.save;
+ auto r2 = range.save;
+ foreach(i; 0..expected.length)
+ {
+ //trace("Forward iteration: ", i);
+
+ // Test length/empty
+ assert(range.length == expected.length - i);
+ assert(range.length == r2.length);
+ assert(!range.empty);
+ assert(!r2.empty);
+
+ // Test front
+ assert(equals(range.front, expected[i]));
+ assert(equals(range.front, r2.front));
+
+ // Test back
+ assert(equals(range.back, expected[$-1]));
+ assert(equals(range.back, r2.back));
+
+ // Test opIndex(0)
+ assert(equals(range[0], expected[i]));
+ assert(equals(range[0], r2[0]));
+
+ // Test opIndex($-1)
+ assert(equals(range[$-1], expected[$-1]));
+ assert(equals(range[$-1], r2[$-1]));
+
+ // Test popFront
+ range.popFront();
+ assert(range.length == r2.length - 1);
+ r2.popFront();
+ assert(range.length == r2.length);
+ }
+ assert(range.empty);
+ assert(r2.empty);
+ assert(original.length == expected.length);
+
+ // Backwards iteration
+ range = original.save;
+ r2 = original.save;
+ foreach(i; iota(0, expected.length).retro())
+ {
+ //trace("Backwards iteration: ", i);
+
+ // Test length/empty
+ assert(range.length == i+1);
+ assert(range.length == r2.length);
+ assert(!range.empty);
+ assert(!r2.empty);
+
+ // Test front
+ assert(equals(range.front, expected[0]));
+ assert(equals(range.front, r2.front));
+
+ // Test back
+ assert(equals(range.back, expected[i]));
+ assert(equals(range.back, r2.back));
+
+ // Test opIndex(0)
+ assert(equals(range[0], expected[0]));
+ assert(equals(range[0], r2[0]));
+
+ // Test opIndex($-1)
+ assert(equals(range[$-1], expected[i]));
+ assert(equals(range[$-1], r2[$-1]));
+
+ // Test popBack
+ range.popBack();
+ assert(range.length == r2.length - 1);
+ r2.popBack();
+ assert(range.length == r2.length);
+ }
+ assert(range.empty);
+ assert(r2.empty);
+ assert(original.length == expected.length);
+
+ // Random access
+ range = original.save;
+ r2 = original.save;
+ foreach(i; 0..expected.length)
+ {
+ //trace("Random access: ", i);
+
+ // Test length/empty
+ assert(range.length == expected.length);
+ assert(range.length == r2.length);
+ assert(!range.empty);
+ assert(!r2.empty);
+
+ // Test front
+ assert(equals(range.front, expected[0]));
+ assert(equals(range.front, r2.front));
+
+ // Test back
+ assert(equals(range.back, expected[$-1]));
+ assert(equals(range.back, r2.back));
+
+ // Test opIndex(i)
+ assert(equals(range[i], expected[i]));
+ assert(equals(range[i], r2[i]));
+ }
+ assert(!range.empty);
+ assert(!r2.empty);
+ assert(original.length == expected.length);
+ }
+}
+
+version(sdlangUnittest)
+unittest
+{
+ import sdlang.parser;
+ writeln("Unittesting sdlang ast...");
+ stdout.flush();
+
+ Tag root;
+ root = parseSource("");
+ testRandomAccessRange(root.attributes, cast( Attribute[])[]);
+ testRandomAccessRange(root.tags, cast( Tag[])[]);
+ testRandomAccessRange(root.namespaces, cast(Tag.NamespaceAccess[])[]);
+
+ root = parseSource(`
+ blue 3 "Lee" isThree=true
+ blue 5 "Chan" 12345 isThree=false
+ stuff:orange 1 2 3 2 1
+ stuff:square points=4 dimensions=2 points="Still four"
+ stuff:triangle data:points=3 data:dimensions=2
+ nothing
+ namespaces small:A=1 med:A=2 big:A=3 small:B=10 big:B=30
+
+ people visitor:a=1 b=2 {
+ chiyo "Small" "Flies?" nemesis="Car" score=100
+ yukari
+ visitor:sana
+ tomo
+ visitor:hayama
+ }
+ `);
+
+ auto blue3 = new Tag(
+ null, "", "blue",
+ [ Value(3), Value("Lee") ],
+ [ new Attribute("isThree", Value(true)) ],
+ null
+ );
+ auto blue5 = new Tag(
+ null, "", "blue",
+ [ Value(5), Value("Chan"), Value(12345) ],
+ [ new Attribute("isThree", Value(false)) ],
+ null
+ );
+ auto orange = new Tag(
+ null, "stuff", "orange",
+ [ Value(1), Value(2), Value(3), Value(2), Value(1) ],
+ null,
+ null
+ );
+ auto square = new Tag(
+ null, "stuff", "square",
+ null,
+ [
+ new Attribute("points", Value(4)),
+ new Attribute("dimensions", Value(2)),
+ new Attribute("points", Value("Still four")),
+ ],
+ null
+ );
+ auto triangle = new Tag(
+ null, "stuff", "triangle",
+ null,
+ [
+ new Attribute("data", "points", Value(3)),
+ new Attribute("data", "dimensions", Value(2)),
+ ],
+ null
+ );
+ auto nothing = new Tag(
+ null, "", "nothing",
+ null, null, null
+ );
+ auto namespaces = new Tag(
+ null, "", "namespaces",
+ null,
+ [
+ new Attribute("small", "A", Value(1)),
+ new Attribute("med", "A", Value(2)),
+ new Attribute("big", "A", Value(3)),
+ new Attribute("small", "B", Value(10)),
+ new Attribute("big", "B", Value(30)),
+ ],
+ null
+ );
+ auto chiyo = new Tag(
+ null, "", "chiyo",
+ [ Value("Small"), Value("Flies?") ],
+ [
+ new Attribute("nemesis", Value("Car")),
+ new Attribute("score", Value(100)),
+ ],
+ null
+ );
+ auto chiyo_ = new Tag(
+ null, "", "chiyo_",
+ [ Value("Small"), Value("Flies?") ],
+ [
+ new Attribute("nemesis", Value("Car")),
+ new Attribute("score", Value(100)),
+ ],
+ null
+ );
+ auto yukari = new Tag(
+ null, "", "yukari",
+ null, null, null
+ );
+ auto sana = new Tag(
+ null, "visitor", "sana",
+ null, null, null
+ );
+ auto sana_ = new Tag(
+ null, "visitor", "sana_",
+ null, null, null
+ );
+ auto sanaVisitor_ = new Tag(
+ null, "visitor_", "sana_",
+ null, null, null
+ );
+ auto tomo = new Tag(
+ null, "", "tomo",
+ null, null, null
+ );
+ auto hayama = new Tag(
+ null, "visitor", "hayama",
+ null, null, null
+ );
+ auto people = new Tag(
+ null, "", "people",
+ null,
+ [
+ new Attribute("visitor", "a", Value(1)),
+ new Attribute("b", Value(2)),
+ ],
+ [chiyo, yukari, sana, tomo, hayama]
+ );
+
+ assert(blue3 .opEquals( blue3 ));
+ assert(blue5 .opEquals( blue5 ));
+ assert(orange .opEquals( orange ));
+ assert(square .opEquals( square ));
+ assert(triangle .opEquals( triangle ));
+ assert(nothing .opEquals( nothing ));
+ assert(namespaces .opEquals( namespaces ));
+ assert(people .opEquals( people ));
+ assert(chiyo .opEquals( chiyo ));
+ assert(yukari .opEquals( yukari ));
+ assert(sana .opEquals( sana ));
+ assert(tomo .opEquals( tomo ));
+ assert(hayama .opEquals( hayama ));
+
+ assert(!blue3.opEquals(orange));
+ assert(!blue3.opEquals(people));
+ assert(!blue3.opEquals(sana));
+ assert(!blue3.opEquals(blue5));
+ assert(!blue5.opEquals(blue3));
+
+ alias Tag.NamespaceAccess NSA;
+ static bool namespaceEquals(NSA n1, NSA n2)
+ {
+ return n1.name == n2.name;
+ }
+
+ testRandomAccessRange(root.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(root.tags, [blue3, blue5, nothing, namespaces, people]);
+ testRandomAccessRange(root.namespaces, [NSA(""), NSA("stuff")], &namespaceEquals);
+ testRandomAccessRange(root.namespaces[0].tags, [blue3, blue5, nothing, namespaces, people]);
+ testRandomAccessRange(root.namespaces[1].tags, [orange, square, triangle]);
+ assert("" in root.namespaces);
+ assert("stuff" in root.namespaces);
+ assert("foobar" !in root.namespaces);
+ testRandomAccessRange(root.namespaces[ ""].tags, [blue3, blue5, nothing, namespaces, people]);
+ testRandomAccessRange(root.namespaces["stuff"].tags, [orange, square, triangle]);
+ testRandomAccessRange(root.all.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(root.all.tags, [blue3, blue5, orange, square, triangle, nothing, namespaces, people]);
+ testRandomAccessRange(root.all.tags[], [blue3, blue5, orange, square, triangle, nothing, namespaces, people]);
+ testRandomAccessRange(root.all.tags[3..6], [square, triangle, nothing]);
+ assert("blue" in root.tags);
+ assert("nothing" in root.tags);
+ assert("people" in root.tags);
+ assert("orange" !in root.tags);
+ assert("square" !in root.tags);
+ assert("foobar" !in root.tags);
+ assert("blue" in root.all.tags);
+ assert("nothing" in root.all.tags);
+ assert("people" in root.all.tags);
+ assert("orange" in root.all.tags);
+ assert("square" in root.all.tags);
+ assert("foobar" !in root.all.tags);
+ assert("orange" in root.namespaces["stuff"].tags);
+ assert("square" in root.namespaces["stuff"].tags);
+ assert("square" in root.namespaces["stuff"].tags);
+ assert("foobar" !in root.attributes);
+ assert("foobar" !in root.all.attributes);
+ assert("foobar" !in root.namespaces["stuff"].attributes);
+ assert("blue" !in root.attributes);
+ assert("blue" !in root.all.attributes);
+ assert("blue" !in root.namespaces["stuff"].attributes);
+ testRandomAccessRange(root.tags["nothing"], [nothing]);
+ testRandomAccessRange(root.tags["blue"], [blue3, blue5]);
+ testRandomAccessRange(root.namespaces["stuff"].tags["orange"], [orange]);
+ testRandomAccessRange(root.all.tags["nothing"], [nothing]);
+ testRandomAccessRange(root.all.tags["blue"], [blue3, blue5]);
+ testRandomAccessRange(root.all.tags["orange"], [orange]);
+
+ assertThrown!SDLangRangeException(root.tags["foobar"]);
+ assertThrown!SDLangRangeException(root.all.tags["foobar"]);
+ assertThrown!SDLangRangeException(root.attributes["foobar"]);
+ assertThrown!SDLangRangeException(root.all.attributes["foobar"]);
+
+ // DMD Issue #12585 causes a segfault in these two tests when using 2.064 or 2.065,
+ // so work around it.
+ //assertThrown!SDLangRangeException(root.namespaces["foobar"].tags["foobar"]);
+ //assertThrown!SDLangRangeException(root.namespaces["foobar"].attributes["foobar"]);
+ bool didCatch = false;
+ try
+ auto x = root.namespaces["foobar"].tags["foobar"];
+ catch(SDLangRangeException e)
+ didCatch = true;
+ assert(didCatch);
+
+ didCatch = false;
+ try
+ auto x = root.namespaces["foobar"].attributes["foobar"];
+ catch(SDLangRangeException e)
+ didCatch = true;
+ assert(didCatch);
+
+ testRandomAccessRange(root.maybe.tags["nothing"], [nothing]);
+ testRandomAccessRange(root.maybe.tags["blue"], [blue3, blue5]);
+ testRandomAccessRange(root.maybe.namespaces["stuff"].tags["orange"], [orange]);
+ testRandomAccessRange(root.maybe.all.tags["nothing"], [nothing]);
+ testRandomAccessRange(root.maybe.all.tags["blue"], [blue3, blue5]);
+ testRandomAccessRange(root.maybe.all.tags["blue"][], [blue3, blue5]);
+ testRandomAccessRange(root.maybe.all.tags["blue"][0..1], [blue3]);
+ testRandomAccessRange(root.maybe.all.tags["blue"][1..2], [blue5]);
+ testRandomAccessRange(root.maybe.all.tags["orange"], [orange]);
+ testRandomAccessRange(root.maybe.tags["foobar"], cast(Tag[])[]);
+ testRandomAccessRange(root.maybe.all.tags["foobar"], cast(Tag[])[]);
+ testRandomAccessRange(root.maybe.namespaces["foobar"].tags["foobar"], cast(Tag[])[]);
+ testRandomAccessRange(root.maybe.attributes["foobar"], cast(Attribute[])[]);
+ testRandomAccessRange(root.maybe.all.attributes["foobar"], cast(Attribute[])[]);
+ testRandomAccessRange(root.maybe.namespaces["foobar"].attributes["foobar"], cast(Attribute[])[]);
+
+ testRandomAccessRange(blue3.attributes, [ new Attribute("isThree", Value(true)) ]);
+ testRandomAccessRange(blue3.tags, cast(Tag[])[]);
+ testRandomAccessRange(blue3.namespaces, [NSA("")], &namespaceEquals);
+ testRandomAccessRange(blue3.all.attributes, [ new Attribute("isThree", Value(true)) ]);
+ testRandomAccessRange(blue3.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(blue5.attributes, [ new Attribute("isThree", Value(false)) ]);
+ testRandomAccessRange(blue5.tags, cast(Tag[])[]);
+ testRandomAccessRange(blue5.namespaces, [NSA("")], &namespaceEquals);
+ testRandomAccessRange(blue5.all.attributes, [ new Attribute("isThree", Value(false)) ]);
+ testRandomAccessRange(blue5.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(orange.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(orange.tags, cast(Tag[])[]);
+ testRandomAccessRange(orange.namespaces, cast(NSA[])[], &namespaceEquals);
+ testRandomAccessRange(orange.all.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(orange.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(square.attributes, [
+ new Attribute("points", Value(4)),
+ new Attribute("dimensions", Value(2)),
+ new Attribute("points", Value("Still four")),
+ ]);
+ testRandomAccessRange(square.tags, cast(Tag[])[]);
+ testRandomAccessRange(square.namespaces, [NSA("")], &namespaceEquals);
+ testRandomAccessRange(square.all.attributes, [
+ new Attribute("points", Value(4)),
+ new Attribute("dimensions", Value(2)),
+ new Attribute("points", Value("Still four")),
+ ]);
+ testRandomAccessRange(square.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(triangle.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(triangle.tags, cast(Tag[])[]);
+ testRandomAccessRange(triangle.namespaces, [NSA("data")], &namespaceEquals);
+ testRandomAccessRange(triangle.namespaces[0].attributes, [
+ new Attribute("data", "points", Value(3)),
+ new Attribute("data", "dimensions", Value(2)),
+ ]);
+ assert("data" in triangle.namespaces);
+ assert("foobar" !in triangle.namespaces);
+ testRandomAccessRange(triangle.namespaces["data"].attributes, [
+ new Attribute("data", "points", Value(3)),
+ new Attribute("data", "dimensions", Value(2)),
+ ]);
+ testRandomAccessRange(triangle.all.attributes, [
+ new Attribute("data", "points", Value(3)),
+ new Attribute("data", "dimensions", Value(2)),
+ ]);
+ testRandomAccessRange(triangle.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(nothing.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(nothing.tags, cast(Tag[])[]);
+ testRandomAccessRange(nothing.namespaces, cast(NSA[])[], &namespaceEquals);
+ testRandomAccessRange(nothing.all.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(nothing.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(namespaces.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(namespaces.tags, cast(Tag[])[]);
+ testRandomAccessRange(namespaces.namespaces, [NSA("small"), NSA("med"), NSA("big")], &namespaceEquals);
+ testRandomAccessRange(namespaces.namespaces[], [NSA("small"), NSA("med"), NSA("big")], &namespaceEquals);
+ testRandomAccessRange(namespaces.namespaces[1..2], [NSA("med")], &namespaceEquals);
+ testRandomAccessRange(namespaces.namespaces[0].attributes, [
+ new Attribute("small", "A", Value(1)),
+ new Attribute("small", "B", Value(10)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces[1].attributes, [
+ new Attribute("med", "A", Value(2)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces[2].attributes, [
+ new Attribute("big", "A", Value(3)),
+ new Attribute("big", "B", Value(30)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces[1..2][0].attributes, [
+ new Attribute("med", "A", Value(2)),
+ ]);
+ assert("small" in namespaces.namespaces);
+ assert("med" in namespaces.namespaces);
+ assert("big" in namespaces.namespaces);
+ assert("foobar" !in namespaces.namespaces);
+ assert("small" !in namespaces.namespaces[1..2]);
+ assert("med" in namespaces.namespaces[1..2]);
+ assert("big" !in namespaces.namespaces[1..2]);
+ assert("foobar" !in namespaces.namespaces[1..2]);
+ testRandomAccessRange(namespaces.namespaces["small"].attributes, [
+ new Attribute("small", "A", Value(1)),
+ new Attribute("small", "B", Value(10)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces["med"].attributes, [
+ new Attribute("med", "A", Value(2)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces["big"].attributes, [
+ new Attribute("big", "A", Value(3)),
+ new Attribute("big", "B", Value(30)),
+ ]);
+ testRandomAccessRange(namespaces.all.attributes, [
+ new Attribute("small", "A", Value(1)),
+ new Attribute("med", "A", Value(2)),
+ new Attribute("big", "A", Value(3)),
+ new Attribute("small", "B", Value(10)),
+ new Attribute("big", "B", Value(30)),
+ ]);
+ testRandomAccessRange(namespaces.all.attributes[], [
+ new Attribute("small", "A", Value(1)),
+ new Attribute("med", "A", Value(2)),
+ new Attribute("big", "A", Value(3)),
+ new Attribute("small", "B", Value(10)),
+ new Attribute("big", "B", Value(30)),
+ ]);
+ testRandomAccessRange(namespaces.all.attributes[2..4], [
+ new Attribute("big", "A", Value(3)),
+ new Attribute("small", "B", Value(10)),
+ ]);
+ testRandomAccessRange(namespaces.all.tags, cast(Tag[])[]);
+ assert("A" !in namespaces.attributes);
+ assert("B" !in namespaces.attributes);
+ assert("foobar" !in namespaces.attributes);
+ assert("A" in namespaces.all.attributes);
+ assert("B" in namespaces.all.attributes);
+ assert("foobar" !in namespaces.all.attributes);
+ assert("A" in namespaces.namespaces["small"].attributes);
+ assert("B" in namespaces.namespaces["small"].attributes);
+ assert("foobar" !in namespaces.namespaces["small"].attributes);
+ assert("A" in namespaces.namespaces["med"].attributes);
+ assert("B" !in namespaces.namespaces["med"].attributes);
+ assert("foobar" !in namespaces.namespaces["med"].attributes);
+ assert("A" in namespaces.namespaces["big"].attributes);
+ assert("B" in namespaces.namespaces["big"].attributes);
+ assert("foobar" !in namespaces.namespaces["big"].attributes);
+ assert("foobar" !in namespaces.tags);
+ assert("foobar" !in namespaces.all.tags);
+ assert("foobar" !in namespaces.namespaces["small"].tags);
+ assert("A" !in namespaces.tags);
+ assert("A" !in namespaces.all.tags);
+ assert("A" !in namespaces.namespaces["small"].tags);
+ testRandomAccessRange(namespaces.namespaces["small"].attributes["A"], [
+ new Attribute("small", "A", Value(1)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces["med"].attributes["A"], [
+ new Attribute("med", "A", Value(2)),
+ ]);
+ testRandomAccessRange(namespaces.namespaces["big"].attributes["A"], [
+ new Attribute("big", "A", Value(3)),
+ ]);
+ testRandomAccessRange(namespaces.all.attributes["A"], [
+ new Attribute("small", "A", Value(1)),
+ new Attribute("med", "A", Value(2)),
+ new Attribute("big", "A", Value(3)),
+ ]);
+ testRandomAccessRange(namespaces.all.attributes["B"], [
+ new Attribute("small", "B", Value(10)),
+ new Attribute("big", "B", Value(30)),
+ ]);
+
+ testRandomAccessRange(chiyo.attributes, [
+ new Attribute("nemesis", Value("Car")),
+ new Attribute("score", Value(100)),
+ ]);
+ testRandomAccessRange(chiyo.tags, cast(Tag[])[]);
+ testRandomAccessRange(chiyo.namespaces, [NSA("")], &namespaceEquals);
+ testRandomAccessRange(chiyo.all.attributes, [
+ new Attribute("nemesis", Value("Car")),
+ new Attribute("score", Value(100)),
+ ]);
+ testRandomAccessRange(chiyo.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(yukari.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(yukari.tags, cast(Tag[])[]);
+ testRandomAccessRange(yukari.namespaces, cast(NSA[])[], &namespaceEquals);
+ testRandomAccessRange(yukari.all.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(yukari.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(sana.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(sana.tags, cast(Tag[])[]);
+ testRandomAccessRange(sana.namespaces, cast(NSA[])[], &namespaceEquals);
+ testRandomAccessRange(sana.all.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(sana.all.tags, cast(Tag[])[]);
+
+ testRandomAccessRange(people.attributes, [new Attribute("b", Value(2))]);
+ testRandomAccessRange(people.tags, [chiyo, yukari, tomo]);
+ testRandomAccessRange(people.namespaces, [NSA("visitor"), NSA("")], &namespaceEquals);
+ testRandomAccessRange(people.namespaces[0].attributes, [new Attribute("visitor", "a", Value(1))]);
+ testRandomAccessRange(people.namespaces[1].attributes, [new Attribute("b", Value(2))]);
+ testRandomAccessRange(people.namespaces[0].tags, [sana, hayama]);
+ testRandomAccessRange(people.namespaces[1].tags, [chiyo, yukari, tomo]);
+ assert("visitor" in people.namespaces);
+ assert("" in people.namespaces);
+ assert("foobar" !in people.namespaces);
+ testRandomAccessRange(people.namespaces["visitor"].attributes, [new Attribute("visitor", "a", Value(1))]);
+ testRandomAccessRange(people.namespaces[ ""].attributes, [new Attribute("b", Value(2))]);
+ testRandomAccessRange(people.namespaces["visitor"].tags, [sana, hayama]);
+ testRandomAccessRange(people.namespaces[ ""].tags, [chiyo, yukari, tomo]);
+ testRandomAccessRange(people.all.attributes, [
+ new Attribute("visitor", "a", Value(1)),
+ new Attribute("b", Value(2)),
+ ]);
+ testRandomAccessRange(people.all.tags, [chiyo, yukari, sana, tomo, hayama]);
+
+ people.attributes["b"][0].name = "b_";
+ people.namespaces["visitor"].attributes["a"][0].name = "a_";
+ people.tags["chiyo"][0].name = "chiyo_";
+ people.namespaces["visitor"].tags["sana"][0].name = "sana_";
+
+ assert("b_" in people.attributes);
+ assert("a_" in people.namespaces["visitor"].attributes);
+ assert("chiyo_" in people.tags);
+ assert("sana_" in people.namespaces["visitor"].tags);
+
+ assert(people.attributes["b_"][0] == new Attribute("b_", Value(2)));
+ assert(people.namespaces["visitor"].attributes["a_"][0] == new Attribute("visitor", "a_", Value(1)));
+ assert(people.tags["chiyo_"][0] == chiyo_);
+ assert(people.namespaces["visitor"].tags["sana_"][0] == sana_);
+
+ assert("b" !in people.attributes);
+ assert("a" !in people.namespaces["visitor"].attributes);
+ assert("chiyo" !in people.tags);
+ assert("sana" !in people.namespaces["visitor"].tags);
+
+ assert(people.maybe.attributes["b"].length == 0);
+ assert(people.maybe.namespaces["visitor"].attributes["a"].length == 0);
+ assert(people.maybe.tags["chiyo"].length == 0);
+ assert(people.maybe.namespaces["visitor"].tags["sana"].length == 0);
+
+ people.tags["tomo"][0].remove();
+ people.namespaces["visitor"].tags["hayama"][0].remove();
+ people.tags["chiyo_"][0].remove();
+ testRandomAccessRange(people.tags, [yukari]);
+ testRandomAccessRange(people.namespaces, [NSA("visitor"), NSA("")], &namespaceEquals);
+ testRandomAccessRange(people.namespaces[0].tags, [sana_]);
+ testRandomAccessRange(people.namespaces[1].tags, [yukari]);
+ assert("visitor" in people.namespaces);
+ assert("" in people.namespaces);
+ assert("foobar" !in people.namespaces);
+ testRandomAccessRange(people.namespaces["visitor"].tags, [sana_]);
+ testRandomAccessRange(people.namespaces[ ""].tags, [yukari]);
+ testRandomAccessRange(people.all.tags, [yukari, sana_]);
+
+ people.attributes["b_"][0].namespace = "_";
+ people.namespaces["visitor"].attributes["a_"][0].namespace = "visitor_";
+ assert("_" in people.namespaces);
+ assert("visitor_" in people.namespaces);
+ assert("" in people.namespaces);
+ assert("visitor" in people.namespaces);
+ people.namespaces["visitor"].tags["sana_"][0].namespace = "visitor_";
+ assert("_" in people.namespaces);
+ assert("visitor_" in people.namespaces);
+ assert("" in people.namespaces);
+ assert("visitor" !in people.namespaces);
+
+ assert(people.namespaces["_" ].attributes["b_"][0] == new Attribute("_", "b_", Value(2)));
+ assert(people.namespaces["visitor_"].attributes["a_"][0] == new Attribute("visitor_", "a_", Value(1)));
+ assert(people.namespaces["visitor_"].tags["sana_"][0] == sanaVisitor_);
+
+ people.tags["yukari"][0].remove();
+ people.namespaces["visitor_"].tags["sana_"][0].remove();
+ people.namespaces["visitor_"].attributes["a_"][0].namespace = "visitor";
+ people.namespaces["_"].attributes["b_"][0].namespace = "";
+ testRandomAccessRange(people.tags, cast(Tag[])[]);
+ testRandomAccessRange(people.namespaces, [NSA("visitor"), NSA("")], &namespaceEquals);
+ testRandomAccessRange(people.namespaces[0].tags, cast(Tag[])[]);
+ testRandomAccessRange(people.namespaces[1].tags, cast(Tag[])[]);
+ assert("visitor" in people.namespaces);
+ assert("" in people.namespaces);
+ assert("foobar" !in people.namespaces);
+ testRandomAccessRange(people.namespaces["visitor"].tags, cast(Tag[])[]);
+ testRandomAccessRange(people.namespaces[ ""].tags, cast(Tag[])[]);
+ testRandomAccessRange(people.all.tags, cast(Tag[])[]);
+
+ people.namespaces["visitor"].attributes["a_"][0].remove();
+ testRandomAccessRange(people.attributes, [new Attribute("b_", Value(2))]);
+ testRandomAccessRange(people.namespaces, [NSA("")], &namespaceEquals);
+ testRandomAccessRange(people.namespaces[0].attributes, [new Attribute("b_", Value(2))]);
+ assert("visitor" !in people.namespaces);
+ assert("" in people.namespaces);
+ assert("foobar" !in people.namespaces);
+ testRandomAccessRange(people.namespaces[""].attributes, [new Attribute("b_", Value(2))]);
+ testRandomAccessRange(people.all.attributes, [
+ new Attribute("b_", Value(2)),
+ ]);
+
+ people.attributes["b_"][0].remove();
+ testRandomAccessRange(people.attributes, cast(Attribute[])[]);
+ testRandomAccessRange(people.namespaces, cast(NSA[])[], &namespaceEquals);
+ assert("visitor" !in people.namespaces);
+ assert("" !in people.namespaces);
+ assert("foobar" !in people.namespaces);
+ testRandomAccessRange(people.all.attributes, cast(Attribute[])[]);
+}
+
+// Regression test, issue #11: https://github.com/Abscissa/SDLang-D/issues/11
+version(sdlangUnittest)
+unittest
+{
+ import sdlang.parser;
+ writeln("ast: Regression test issue #11...");
+ stdout.flush();
+
+ auto root = parseSource(
+`//
+a`);
+
+ assert("a" in root.tags);
+
+ root = parseSource(
+`//
+parent {
+ child
+}
+`);
+
+ auto child = new Tag(
+ null, "", "child",
+ null, null, null
+ );
+
+ assert("parent" in root.tags);
+ assert("child" !in root.tags);
+ testRandomAccessRange(root.tags["parent"][0].tags, [child]);
+ assert("child" in root.tags["parent"][0].tags);
+}
diff --git a/src/sdlang/dub.json b/src/sdlang/dub.json
new file mode 100644
index 0000000..d5a0493
--- /dev/null
+++ b/src/sdlang/dub.json
@@ -0,0 +1,38 @@
+{
+ "name": "sdlang-d",
+ "description": "An SDL (Simple Declarative Language) library for D.",
+ "homepage": "http://github.com/Abscissa/SDLang-D",
+ "authors": ["Nick Sabalausky"],
+ "license": "zlib/libpng",
+ "copyright": "©2012-2015 Nick Sabalausky",
+ "sourcePaths": ["."],
+ "importPaths": ["."],
+ "buildRequirements": ["allowWarnings"],
+ "dependencies": {
+ "libinputvisitor": "~>1.2.0"
+ },
+ "subPackages": [
+ "./libinputvisitor"
+ ],
+ "configurations": [
+ {
+ "name": "test",
+ "targetType": "executable",
+ "versions": ["SDLang_TestApp"],
+ "targetPath": "../../bin/",
+ "targetName": "sdlang"
+ },
+ {
+ "name": "library",
+ "targetType": "library"
+ },
+ {
+ "name": "unittest",
+ "targetType": "executable",
+ "targetPath": "../../bin/",
+ "targetName": "sdlang-unittest",
+
+ "versions": ["sdlangUnittest", "sdlangTrace"]
+ }
+ ]
+}
diff --git a/src/sdlang/exception.d b/src/sdlang/exception.d
new file mode 100644
index 0000000..e87307f
--- /dev/null
+++ b/src/sdlang/exception.d
@@ -0,0 +1,42 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.exception;
+
+import std.exception;
+import std.string;
+
+import sdlang.util;
+
+abstract class SDLangException : Exception
+{
+ this(string msg) { super(msg); }
+}
+
+class SDLangParseException : SDLangException
+{
+ Location location;
+ bool hasLocation;
+
+ this(string msg)
+ {
+ hasLocation = false;
+ super(msg);
+ }
+
+ this(Location location, string msg)
+ {
+ hasLocation = true;
+ super("%s: %s".format(location.toString(), msg));
+ }
+}
+
+class SDLangValidationException : SDLangException
+{
+ this(string msg) { super(msg); }
+}
+
+class SDLangRangeException : SDLangException
+{
+ this(string msg) { super(msg); }
+}
diff --git a/src/sdlang/lexer.d b/src/sdlang/lexer.d
new file mode 100644
index 0000000..6eeeac2
--- /dev/null
+++ b/src/sdlang/lexer.d
@@ -0,0 +1,2068 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.lexer;
+
+import std.algorithm;
+import std.array;
+import std.base64;
+import std.bigint;
+import std.conv;
+import std.datetime;
+import std.file;
+import std.stream : ByteOrderMarks, BOM;
+import std.traits;
+import std.typecons;
+import std.uni;
+import std.utf;
+import std.variant;
+
+import sdlang.exception;
+import sdlang.symbol;
+import sdlang.token;
+import sdlang.util;
+
+alias sdlang.util.startsWith startsWith;
+
+Token[] lexFile(string filename)
+{
+ auto source = cast(string)read(filename);
+ return lexSource(source, filename);
+}
+
+Token[] lexSource(string source, string filename=null)
+{
+ auto lexer = scoped!Lexer(source, filename);
+
+ // Can't use 'std.array.array(Range)' because 'lexer' is scoped
+ // and therefore cannot have its reference copied.
+ Appender!(Token[]) tokens;
+ foreach(tok; lexer)
+ tokens.put(tok);
+
+ return tokens.data;
+}
+
+// Kind of a poor-man's yield, but fast.
+// Only to be used inside Lexer.popFront (and Lexer.this).
+private template accept(string symbolName)
+{
+ static assert(symbolName != "Value", "Value symbols must also take a value.");
+ enum accept = acceptImpl!(symbolName, "null");
+}
+private template accept(string symbolName, string value)
+{
+ static assert(symbolName == "Value", "Only a Value symbol can take a value.");
+ enum accept = acceptImpl!(symbolName, value);
+}
+private template accept(string symbolName, string value, string startLocation, string endLocation)
+{
+ static assert(symbolName == "Value", "Only a Value symbol can take a value.");
+ enum accept = ("
+ {
+ _front = makeToken!"~symbolName.stringof~";
+ _front.value = "~value~";
+ _front.location = "~(startLocation==""? "tokenStart" : startLocation)~";
+ _front.data = source[
+ "~(startLocation==""? "tokenStart.index" : startLocation)~"
+ ..
+ "~(endLocation==""? "location.index" : endLocation)~"
+ ];
+ return;
+ }
+ ").replace("\n", "");
+}
+private template acceptImpl(string symbolName, string value)
+{
+ enum acceptImpl = ("
+ {
+ _front = makeToken!"~symbolName.stringof~";
+ _front.value = "~value~";
+ return;
+ }
+ ").replace("\n", "");
+}
+
+class Lexer
+{
+ string source;
+ string filename;
+ Location location; /// Location of current character in source
+
+ private dchar ch; // Current character
+ private dchar nextCh; // Lookahead character
+ private size_t nextPos; // Position of lookahead character (an index into source)
+ private bool hasNextCh; // If false, then there's no more lookahead, just EOF
+ private size_t posAfterLookahead; // Position after lookahead character (an index into source)
+
+ private Location tokenStart; // The starting location of the token being lexed
+
+ // Length so far of the token being lexed, not including current char
+ private size_t tokenLength; // Length in UTF-8 code units
+ private size_t tokenLength32; // Length in UTF-32 code units
+
+ // Slight kludge:
+ // If a numeric fragment is found after a Date (separated by arbitrary
+ // whitespace), it could be the "hours" part of a DateTime, or it could
+ // be a separate numeric literal that simply follows a plain Date. If the
+ // latter, then the Date must be emitted, but numeric fragment that was
+ // found after it needs to be saved for the the lexer's next iteration.
+ //
+ // It's a slight kludge, and could instead be implemented as a slightly
+ // kludgey parser hack, but it's the only situation where SDL's lexing
+ // needs to lookahead more than one character, so this is good enough.
+ private struct LookaheadTokenInfo
+ {
+ bool exists = false;
+ string numericFragment = "";
+ bool isNegative = false;
+ Location tokenStart;
+ }
+ private LookaheadTokenInfo lookaheadTokenInfo;
+
+ this(string source=null, string filename=null)
+ {
+ this.filename = filename;
+ this.source = source;
+
+ _front = Token(symbol!"Error", Location());
+ lookaheadTokenInfo = LookaheadTokenInfo.init;
+
+ if( source.startsWith( ByteOrderMarks[BOM.UTF8] ) )
+ {
+ source = source[ ByteOrderMarks[BOM.UTF8].length .. $ ];
+ this.source = source;
+ }
+
+ foreach(bom; ByteOrderMarks)
+ if( source.startsWith(bom) )
+ error(Location(filename,0,0,0), "SDL spec only supports UTF-8, not UTF-16 or UTF-32");
+
+ if(source == "")
+ mixin(accept!"EOF");
+
+ // Prime everything
+ hasNextCh = true;
+ nextCh = source.decode(posAfterLookahead);
+ advanceChar(ErrorOnEOF.Yes);
+ location = Location(filename, 0, 0, 0);
+ popFront();
+ }
+
+ @property bool empty()
+ {
+ return _front.symbol == symbol!"EOF";
+ }
+
+ Token _front;
+ @property Token front()
+ {
+ return _front;
+ }
+
+ @property bool isEOF()
+ {
+ return location.index == source.length && !lookaheadTokenInfo.exists;
+ }
+
+ private void error(string msg)
+ {
+ error(location, msg);
+ }
+
+ private void error(Location loc, string msg)
+ {
+ throw new SDLangParseException(loc, "Error: "~msg);
+ }
+
+ private Token makeToken(string symbolName)()
+ {
+ auto tok = Token(symbol!symbolName, tokenStart);
+ tok.data = tokenData;
+ return tok;
+ }
+
+ private @property string tokenData()
+ {
+ return source[ tokenStart.index .. location.index ];
+ }
+
+ /// Check the lookahead character
+ private bool lookahead(dchar ch)
+ {
+ return hasNextCh && nextCh == ch;
+ }
+
+ private bool lookahead(bool function(dchar) condition)
+ {
+ return hasNextCh && condition(nextCh);
+ }
+
+ private static bool isNewline(dchar ch)
+ {
+ return ch == '\n' || ch == '\r' || ch == lineSep || ch == paraSep;
+ }
+
+ /// Returns the length of the newline sequence, or zero if the current
+ /// character is not a newline
+ ///
+ /// Note that there are only single character sequences and the two
+ /// character sequence `\r\n` as used on Windows.
+ private size_t isAtNewline()
+ {
+ if(ch == '\n' || ch == lineSep || ch == paraSep) return 1;
+ else if(ch == '\r') return lookahead('\n') ? 2 : 1;
+ else return 0;
+ }
+
+ /// Is 'ch' a valid base 64 character?
+ private bool isBase64(dchar ch)
+ {
+ if(ch >= 'A' && ch <= 'Z')
+ return true;
+
+ if(ch >= 'a' && ch <= 'z')
+ return true;
+
+ if(ch >= '0' && ch <= '9')
+ return true;
+
+ return ch == '+' || ch == '/' || ch == '=';
+ }
+
+ /// Is the current character one that's allowed
+ /// immediately *after* an int/float literal?
+ private bool isEndOfNumber()
+ {
+ if(isEOF)
+ return true;
+
+ return !isDigit(ch) && ch != ':' && ch != '_' && !isAlpha(ch);
+ }
+
+ /// Is current character the last one in an ident?
+ private bool isEndOfIdentCached = false;
+ private bool _isEndOfIdent;
+ private bool isEndOfIdent()
+ {
+ if(!isEndOfIdentCached)
+ {
+ if(!hasNextCh)
+ _isEndOfIdent = true;
+ else
+ _isEndOfIdent = !isIdentChar(nextCh);
+
+ isEndOfIdentCached = true;
+ }
+
+ return _isEndOfIdent;
+ }
+
+ /// Is 'ch' a character that's allowed *somewhere* in an identifier?
+ private bool isIdentChar(dchar ch)
+ {
+ if(isAlpha(ch))
+ return true;
+
+ else if(isNumber(ch))
+ return true;
+
+ else
+ return
+ ch == '-' ||
+ ch == '_' ||
+ ch == '.' ||
+ ch == '$';
+ }
+
+ private bool isDigit(dchar ch)
+ {
+ return ch >= '0' && ch <= '9';
+ }
+
+ private enum KeywordResult
+ {
+ Accept, // Keyword is matched
+ Continue, // Keyword is not matched *yet*
+ Failed, // Keyword doesn't match
+ }
+ private KeywordResult checkKeyword(dstring keyword32)
+ {
+ // Still within length of keyword
+ if(tokenLength32 < keyword32.length)
+ {
+ if(ch == keyword32[tokenLength32])
+ return KeywordResult.Continue;
+ else
+ return KeywordResult.Failed;
+ }
+
+ // At position after keyword
+ else if(tokenLength32 == keyword32.length)
+ {
+ if(isEOF || !isIdentChar(ch))
+ {
+ debug assert(tokenData == to!string(keyword32));
+ return KeywordResult.Accept;
+ }
+ else
+ return KeywordResult.Failed;
+ }
+
+ assert(0, "Fell off end of keyword to check");
+ }
+
+ enum ErrorOnEOF { No, Yes }
+
+ /// Advance one code point.
+ private void advanceChar(ErrorOnEOF errorOnEOF)
+ {
+ if(auto cnt = isAtNewline())
+ {
+ if (cnt == 1)
+ location.line++;
+ location.col = 0;
+ }
+ else
+ location.col++;
+
+ location.index = nextPos;
+
+ nextPos = posAfterLookahead;
+ ch = nextCh;
+
+ if(!hasNextCh)
+ {
+ if(errorOnEOF == ErrorOnEOF.Yes)
+ error("Unexpected end of file");
+
+ return;
+ }
+
+ tokenLength32++;
+ tokenLength = location.index - tokenStart.index;
+
+ if(nextPos == source.length)
+ {
+ nextCh = dchar.init;
+ hasNextCh = false;
+ return;
+ }
+
+ nextCh = source.decode(posAfterLookahead);
+ isEndOfIdentCached = false;
+ }
+
+ /// Advances the specified amount of characters
+ private void advanceChar(size_t count, ErrorOnEOF errorOnEOF)
+ {
+ while(count-- > 0)
+ advanceChar(errorOnEOF);
+ }
+
+ void popFront()
+ {
+ // -- Main Lexer -------------
+
+ eatWhite();
+
+ if(isEOF)
+ mixin(accept!"EOF");
+
+ tokenStart = location;
+ tokenLength = 0;
+ tokenLength32 = 0;
+ isEndOfIdentCached = false;
+
+ if(lookaheadTokenInfo.exists)
+ {
+ tokenStart = lookaheadTokenInfo.tokenStart;
+
+ auto prevLATokenInfo = lookaheadTokenInfo;
+ lookaheadTokenInfo = LookaheadTokenInfo.init;
+ lexNumeric(prevLATokenInfo);
+ return;
+ }
+
+ if(ch == '=')
+ {
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!"=");
+ }
+
+ else if(ch == '{')
+ {
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!"{");
+ }
+
+ else if(ch == '}')
+ {
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!"}");
+ }
+
+ else if(ch == ':')
+ {
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!":");
+ }
+
+ else if(ch == ';')
+ {
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!"EOL");
+ }
+
+ else if(auto cnt = isAtNewline())
+ {
+ advanceChar(cnt, ErrorOnEOF.No);
+ mixin(accept!"EOL");
+ }
+
+ else if(isAlpha(ch) || ch == '_')
+ lexIdentKeyword();
+
+ else if(ch == '"')
+ lexRegularString();
+
+ else if(ch == '`')
+ lexRawString();
+
+ else if(ch == '\'')
+ lexCharacter();
+
+ else if(ch == '[')
+ lexBinary();
+
+ else if(ch == '-' || ch == '.' || isDigit(ch))
+ lexNumeric();
+
+ else
+ {
+ advanceChar(ErrorOnEOF.No);
+ error("Syntax error");
+ }
+ }
+
+ /// Lex Ident or Keyword
+ private void lexIdentKeyword()
+ {
+ assert(isAlpha(ch) || ch == '_');
+
+ // Keyword
+ struct Key
+ {
+ dstring name;
+ Value value;
+ bool failed = false;
+ }
+ static Key[5] keywords;
+ static keywordsInited = false;
+ if(!keywordsInited)
+ {
+ // Value (as a std.variant-based type) can't be statically inited
+ keywords[0] = Key("true", Value(true ));
+ keywords[1] = Key("false", Value(false));
+ keywords[2] = Key("on", Value(true ));
+ keywords[3] = Key("off", Value(false));
+ keywords[4] = Key("null", Value(null ));
+ keywordsInited = true;
+ }
+
+ foreach(ref key; keywords)
+ key.failed = false;
+
+ auto numKeys = keywords.length;
+
+ do
+ {
+ foreach(ref key; keywords)
+ if(!key.failed)
+ {
+ final switch(checkKeyword(key.name))
+ {
+ case KeywordResult.Accept:
+ mixin(accept!("Value", "key.value"));
+
+ case KeywordResult.Continue:
+ break;
+
+ case KeywordResult.Failed:
+ key.failed = true;
+ numKeys--;
+ break;
+ }
+ }
+
+ if(numKeys == 0)
+ {
+ lexIdent();
+ return;
+ }
+
+ advanceChar(ErrorOnEOF.No);
+
+ } while(!isEOF);
+
+ foreach(ref key; keywords)
+ if(!key.failed)
+ if(key.name.length == tokenLength32+1)
+ mixin(accept!("Value", "key.value"));
+
+ mixin(accept!"Ident");
+ }
+
+ /// Lex Ident
+ private void lexIdent()
+ {
+ if(tokenLength == 0)
+ assert(isAlpha(ch) || ch == '_');
+
+ while(!isEOF && isIdentChar(ch))
+ advanceChar(ErrorOnEOF.No);
+
+ mixin(accept!"Ident");
+ }
+
+ /// Lex regular string
+ private void lexRegularString()
+ {
+ assert(ch == '"');
+
+ Appender!string buf;
+ size_t spanStart = nextPos;
+
+ // Doesn't include current character
+ void updateBuf()
+ {
+ if(location.index == spanStart)
+ return;
+
+ buf.put( source[spanStart..location.index] );
+ }
+
+ advanceChar(ErrorOnEOF.Yes);
+ while(ch != '"')
+ {
+ if(ch == '\\')
+ {
+ updateBuf();
+
+ bool wasEscSequence = true;
+ if(hasNextCh)
+ {
+ switch(nextCh)
+ {
+ case 'n': buf.put('\n'); break;
+ case 'r': buf.put('\r'); break;
+ case 't': buf.put('\t'); break;
+ case '"': buf.put('\"'); break;
+ case '\\': buf.put('\\'); break;
+ default: wasEscSequence = false; break;
+ }
+ }
+
+ if(wasEscSequence)
+ {
+ advanceChar(ErrorOnEOF.Yes);
+ spanStart = nextPos;
+ }
+ else
+ {
+ eatWhite(false);
+ spanStart = location.index;
+ }
+ }
+
+ else if(isNewline(ch))
+ error("Unescaped newlines are only allowed in raw strings, not regular strings.");
+
+ advanceChar(ErrorOnEOF.Yes);
+ }
+
+ updateBuf();
+ advanceChar(ErrorOnEOF.No); // Skip closing double-quote
+ mixin(accept!("Value", "buf.data"));
+ }
+
+ /// Lex raw string
+ private void lexRawString()
+ {
+ assert(ch == '`');
+
+ do
+ advanceChar(ErrorOnEOF.Yes);
+ while(ch != '`');
+
+ advanceChar(ErrorOnEOF.No); // Skip closing back-tick
+ mixin(accept!("Value", "tokenData[1..$-1]"));
+ }
+
+ /// Lex character literal
+ private void lexCharacter()
+ {
+ assert(ch == '\'');
+ advanceChar(ErrorOnEOF.Yes); // Skip opening single-quote
+
+ dchar value;
+ if(ch == '\\')
+ {
+ advanceChar(ErrorOnEOF.Yes); // Skip escape backslash
+ switch(ch)
+ {
+ case 'n': value = '\n'; break;
+ case 'r': value = '\r'; break;
+ case 't': value = '\t'; break;
+ case '\'': value = '\''; break;
+ case '\\': value = '\\'; break;
+ default: error("Invalid escape sequence.");
+ }
+ }
+ else if(isNewline(ch))
+ error("Newline not alowed in character literal.");
+ else
+ value = ch;
+ advanceChar(ErrorOnEOF.Yes); // Skip the character itself
+
+ if(ch == '\'')
+ advanceChar(ErrorOnEOF.No); // Skip closing single-quote
+ else
+ error("Expected closing single-quote.");
+
+ mixin(accept!("Value", "value"));
+ }
+
+ /// Lex base64 binary literal
+ private void lexBinary()
+ {
+ assert(ch == '[');
+ advanceChar(ErrorOnEOF.Yes);
+
+ void eatBase64Whitespace()
+ {
+ while(!isEOF && isWhite(ch))
+ {
+ if(isNewline(ch))
+ advanceChar(ErrorOnEOF.Yes);
+
+ if(!isEOF && isWhite(ch))
+ eatWhite();
+ }
+ }
+
+ eatBase64Whitespace();
+
+ // Iterates all valid base64 characters, ending at ']'.
+ // Skips all whitespace. Throws on invalid chars.
+ struct Base64InputRange
+ {
+ Lexer lexer;
+ private bool isInited = false;
+ private int numInputCharsMod4 = 0;
+
+ @property bool empty()
+ {
+ if(lexer.ch == ']')
+ {
+ if(numInputCharsMod4 != 0)
+ lexer.error("Length of Base64 encoding must be a multiple of 4. ("~to!string(numInputCharsMod4)~")");
+
+ return true;
+ }
+
+ return false;
+ }
+
+ @property dchar front()
+ {
+ return lexer.ch;
+ }
+
+ void popFront()
+ {
+ auto lex = lexer;
+
+ if(!isInited)
+ {
+ if(lexer.isBase64(lexer.ch))
+ {
+ numInputCharsMod4++;
+ numInputCharsMod4 %= 4;
+ }
+
+ isInited = true;
+ }
+
+ lex.advanceChar(lex.ErrorOnEOF.Yes);
+
+ eatBase64Whitespace();
+
+ if(lex.isEOF)
+ lex.error("Unexpected end of file.");
+
+ if(lex.ch != ']')
+ {
+ if(!lex.isBase64(lex.ch))
+ lex.error("Invalid character in base64 binary literal.");
+
+ numInputCharsMod4++;
+ numInputCharsMod4 %= 4;
+ }
+ }
+ }
+
+ // This is a slow ugly hack. It's necessary because Base64.decode
+ // currently requires the source to have known length.
+ //TODO: Remove this when DMD issue #9543 is fixed.
+ dchar[] tmpBuf = array(Base64InputRange(this));
+
+ Appender!(ubyte[]) outputBuf;
+ // Ugly workaround for DMD issue #9102
+ //TODO: Remove this when DMD #9102 is fixed
+ struct OutputBuf
+ {
+ void put(ubyte ch)
+ {
+ outputBuf.put(ch);
+ }
+ }
+
+ try
+ //Base64.decode(Base64InputRange(this), OutputBuf());
+ Base64.decode(tmpBuf, OutputBuf());
+
+ //TODO: Starting with dmd 2.062, this should be a Base64Exception
+ catch(Exception e)
+ error("Invalid character in base64 binary literal.");
+
+ advanceChar(ErrorOnEOF.No); // Skip ']'
+ mixin(accept!("Value", "outputBuf.data"));
+ }
+
+ private BigInt toBigInt(bool isNegative, string absValue)
+ {
+ auto num = BigInt(absValue);
+ assert(num >= 0);
+
+ if(isNegative)
+ num = -num;
+
+ return num;
+ }
+
+ /// Lex [0-9]+, but without emitting a token.
+ /// This is used by the other numeric parsing functions.
+ private string lexNumericFragment()
+ {
+ if(!isDigit(ch))
+ error("Expected a digit 0-9.");
+
+ auto spanStart = location.index;
+
+ do
+ {
+ advanceChar(ErrorOnEOF.No);
+ } while(!isEOF && isDigit(ch));
+
+ return source[spanStart..location.index];
+ }
+
+ /// Lex anything that starts with 0-9 or '-'. Ints, floats, dates, etc.
+ private void lexNumeric(LookaheadTokenInfo laTokenInfo = LookaheadTokenInfo.init)
+ {
+ bool isNegative;
+ string firstFragment;
+ if(laTokenInfo.exists)
+ {
+ firstFragment = laTokenInfo.numericFragment;
+ isNegative = laTokenInfo.isNegative;
+ }
+ else
+ {
+ assert(ch == '-' || ch == '.' || isDigit(ch));
+
+ // Check for negative
+ isNegative = ch == '-';
+ if(isNegative)
+ advanceChar(ErrorOnEOF.Yes);
+
+ // Some floating point with omitted leading zero?
+ if(ch == '.')
+ {
+ lexFloatingPoint("");
+ return;
+ }
+
+ firstFragment = lexNumericFragment();
+ }
+
+ // Long integer (64-bit signed)?
+ if(ch == 'L' || ch == 'l')
+ {
+ advanceChar(ErrorOnEOF.No);
+
+ // BigInt(long.min) is a workaround for DMD issue #9548
+ auto num = toBigInt(isNegative, firstFragment);
+ if(num < BigInt(long.min) || num > long.max)
+ error(tokenStart, "Value doesn't fit in 64-bit signed long integer: "~to!string(num));
+
+ mixin(accept!("Value", "num.toLong()"));
+ }
+
+ // Float (32-bit signed)?
+ else if(ch == 'F' || ch == 'f')
+ {
+ auto value = to!float(tokenData);
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!("Value", "value"));
+ }
+
+ // Double float (64-bit signed) with suffix?
+ else if((ch == 'D' || ch == 'd') && !lookahead(':')
+ )
+ {
+ auto value = to!double(tokenData);
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!("Value", "value"));
+ }
+
+ // Decimal (128+ bits signed)?
+ else if(
+ (ch == 'B' || ch == 'b') &&
+ (lookahead('D') || lookahead('d'))
+ )
+ {
+ auto value = to!real(tokenData);
+ advanceChar(ErrorOnEOF.No);
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!("Value", "value"));
+ }
+
+ // Some floating point?
+ else if(ch == '.')
+ lexFloatingPoint(firstFragment);
+
+ // Some date?
+ else if(ch == '/' && hasNextCh && isDigit(nextCh))
+ lexDate(isNegative, firstFragment);
+
+ // Some time span?
+ else if(ch == ':' || ch == 'd')
+ lexTimeSpan(isNegative, firstFragment);
+
+ // Integer (32-bit signed)?
+ else if(isEndOfNumber())
+ {
+ auto num = toBigInt(isNegative, firstFragment);
+ if(num < int.min || num > int.max)
+ error(tokenStart, "Value doesn't fit in 32-bit signed integer: "~to!string(num));
+
+ mixin(accept!("Value", "num.toInt()"));
+ }
+
+ // Invalid suffix
+ else
+ error("Invalid integer suffix.");
+ }
+
+ /// Lex any floating-point literal (after the initial numeric fragment was lexed)
+ private void lexFloatingPoint(string firstPart)
+ {
+ assert(ch == '.');
+ advanceChar(ErrorOnEOF.No);
+
+ auto secondPart = lexNumericFragment();
+
+ try
+ {
+ // Double float (64-bit signed) with suffix?
+ if(ch == 'D' || ch == 'd')
+ {
+ auto value = to!double(tokenData);
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!("Value", "value"));
+ }
+
+ // Float (32-bit signed)?
+ else if(ch == 'F' || ch == 'f')
+ {
+ auto value = to!float(tokenData);
+ advanceChar(ErrorOnEOF.No);
+ mixin(accept!("Value", "value"));
+ }
+
+ // Decimal (128+ bits signed)?
+ else if(ch == 'B' || ch == 'b')
+ {
+ auto value = to!real(tokenData);
+ advanceChar(ErrorOnEOF.Yes);
+
+ if(!isEOF && (ch == 'D' || ch == 'd'))
+ {
+ advanceChar(ErrorOnEOF.No);
+ if(isEndOfNumber())
+ mixin(accept!("Value", "value"));
+ }
+
+ error("Invalid floating point suffix.");
+ }
+
+ // Double float (64-bit signed) without suffix?
+ else if(isEOF || !isIdentChar(ch))
+ {
+ auto value = to!double(tokenData);
+ mixin(accept!("Value", "value"));
+ }
+
+ // Invalid suffix
+ else
+ error("Invalid floating point suffix.");
+ }
+ catch(ConvException e)
+ error("Invalid floating point literal.");
+ }
+
+ private Date makeDate(bool isNegative, string yearStr, string monthStr, string dayStr)
+ {
+ BigInt biTmp;
+
+ biTmp = BigInt(yearStr);
+ if(isNegative)
+ biTmp = -biTmp;
+ if(biTmp < int.min || biTmp > int.max)
+ error(tokenStart, "Date's year is out of range. (Must fit within a 32-bit signed int.)");
+ auto year = biTmp.toInt();
+
+ biTmp = BigInt(monthStr);
+ if(biTmp < 1 || biTmp > 12)
+ error(tokenStart, "Date's month is out of range.");
+ auto month = biTmp.toInt();
+
+ biTmp = BigInt(dayStr);
+ if(biTmp < 1 || biTmp > 31)
+ error(tokenStart, "Date's month is out of range.");
+ auto day = biTmp.toInt();
+
+ return Date(year, month, day);
+ }
+
+ private DateTimeFrac makeDateTimeFrac(
+ bool isNegative, Date date, string hourStr, string minuteStr,
+ string secondStr, string millisecondStr
+ )
+ {
+ BigInt biTmp;
+
+ biTmp = BigInt(hourStr);
+ if(biTmp < int.min || biTmp > int.max)
+ error(tokenStart, "Datetime's hour is out of range.");
+ auto numHours = biTmp.toInt();
+
+ biTmp = BigInt(minuteStr);
+ if(biTmp < 0 || biTmp > int.max)
+ error(tokenStart, "Datetime's minute is out of range.");
+ auto numMinutes = biTmp.toInt();
+
+ int numSeconds = 0;
+ if(secondStr != "")
+ {
+ biTmp = BigInt(secondStr);
+ if(biTmp < 0 || biTmp > int.max)
+ error(tokenStart, "Datetime's second is out of range.");
+ numSeconds = biTmp.toInt();
+ }
+
+ int millisecond = 0;
+ if(millisecondStr != "")
+ {
+ biTmp = BigInt(millisecondStr);
+ if(biTmp < 0 || biTmp > int.max)
+ error(tokenStart, "Datetime's millisecond is out of range.");
+ millisecond = biTmp.toInt();
+
+ if(millisecondStr.length == 1)
+ millisecond *= 100;
+ else if(millisecondStr.length == 2)
+ millisecond *= 10;
+ }
+
+ Duration fracSecs = millisecond.msecs;
+
+ auto offset = hours(numHours) + minutes(numMinutes) + seconds(numSeconds);
+
+ if(isNegative)
+ {
+ offset = -offset;
+ fracSecs = -fracSecs;
+ }
+
+ return DateTimeFrac(DateTime(date) + offset, fracSecs);
+ }
+
+ private Duration makeDuration(
+ bool isNegative, string dayStr,
+ string hourStr, string minuteStr, string secondStr,
+ string millisecondStr
+ )
+ {
+ BigInt biTmp;
+
+ long day = 0;
+ if(dayStr != "")
+ {
+ biTmp = BigInt(dayStr);
+ if(biTmp < long.min || biTmp > long.max)
+ error(tokenStart, "Time span's day is out of range.");
+ day = biTmp.toLong();
+ }
+
+ biTmp = BigInt(hourStr);
+ if(biTmp < long.min || biTmp > long.max)
+ error(tokenStart, "Time span's hour is out of range.");
+ auto hour = biTmp.toLong();
+
+ biTmp = BigInt(minuteStr);
+ if(biTmp < long.min || biTmp > long.max)
+ error(tokenStart, "Time span's minute is out of range.");
+ auto minute = biTmp.toLong();
+
+ biTmp = BigInt(secondStr);
+ if(biTmp < long.min || biTmp > long.max)
+ error(tokenStart, "Time span's second is out of range.");
+ auto second = biTmp.toLong();
+
+ long millisecond = 0;
+ if(millisecondStr != "")
+ {
+ biTmp = BigInt(millisecondStr);
+ if(biTmp < long.min || biTmp > long.max)
+ error(tokenStart, "Time span's millisecond is out of range.");
+ millisecond = biTmp.toLong();
+
+ if(millisecondStr.length == 1)
+ millisecond *= 100;
+ else if(millisecondStr.length == 2)
+ millisecond *= 10;
+ }
+
+ auto duration =
+ dur!"days" (day) +
+ dur!"hours" (hour) +
+ dur!"minutes"(minute) +
+ dur!"seconds"(second) +
+ dur!"msecs" (millisecond);
+
+ if(isNegative)
+ duration = -duration;
+
+ return duration;
+ }
+
+ // This has to reproduce some weird corner case behaviors from the
+ // original Java version of SDL. So some of this may seem weird.
+ private Nullable!Duration getTimeZoneOffset(string str)
+ {
+ if(str.length < 2)
+ return Nullable!Duration(); // Unknown timezone
+
+ if(str[0] != '+' && str[0] != '-')
+ return Nullable!Duration(); // Unknown timezone
+
+ auto isNegative = str[0] == '-';
+
+ string numHoursStr;
+ string numMinutesStr;
+ if(str[1] == ':')
+ {
+ numMinutesStr = str[1..$];
+ numHoursStr = "";
+ }
+ else
+ {
+ numMinutesStr = str.find(':');
+ numHoursStr = str[1 .. $-numMinutesStr.length];
+ }
+
+ long numHours = 0;
+ long numMinutes = 0;
+ bool isUnknown = false;
+ try
+ {
+ switch(numHoursStr.length)
+ {
+ case 0:
+ if(numMinutesStr.length == 3)
+ {
+ numHours = 0;
+ numMinutes = to!long(numMinutesStr[1..$]);
+ }
+ else
+ isUnknown = true;
+ break;
+
+ case 1:
+ case 2:
+ if(numMinutesStr.length == 0)
+ {
+ numHours = to!long(numHoursStr);
+ numMinutes = 0;
+ }
+ else if(numMinutesStr.length == 3)
+ {
+ numHours = to!long(numHoursStr);
+ numMinutes = to!long(numMinutesStr[1..$]);
+ }
+ else
+ isUnknown = true;
+ break;
+
+ default:
+ if(numMinutesStr.length == 0)
+ {
+ // Yes, this is correct
+ numHours = 0;
+ numMinutes = to!long(numHoursStr[1..$]);
+ }
+ else
+ isUnknown = true;
+ break;
+ }
+ }
+ catch(ConvException e)
+ isUnknown = true;
+
+ if(isUnknown)
+ return Nullable!Duration(); // Unknown timezone
+
+ auto timeZoneOffset = hours(numHours) + minutes(numMinutes);
+ if(isNegative)
+ timeZoneOffset = -timeZoneOffset;
+
+ // Timezone valid
+ return Nullable!Duration(timeZoneOffset);
+ }
+
+ /// Lex date or datetime (after the initial numeric fragment was lexed)
+ private void lexDate(bool isDateNegative, string yearStr)
+ {
+ assert(ch == '/');
+
+ // Lex months
+ advanceChar(ErrorOnEOF.Yes); // Skip '/'
+ auto monthStr = lexNumericFragment();
+
+ // Lex days
+ if(ch != '/')
+ error("Invalid date format: Missing days.");
+ advanceChar(ErrorOnEOF.Yes); // Skip '/'
+ auto dayStr = lexNumericFragment();
+
+ auto date = makeDate(isDateNegative, yearStr, monthStr, dayStr);
+
+ if(!isEndOfNumber() && ch != '/')
+ error("Dates cannot have suffixes.");
+
+ // Date?
+ if(isEOF)
+ mixin(accept!("Value", "date"));
+
+ auto endOfDate = location;
+
+ while(
+ !isEOF &&
+ ( ch == '\\' || ch == '/' || (isWhite(ch) && !isNewline(ch)) )
+ )
+ {
+ if(ch == '\\' && hasNextCh && isNewline(nextCh))
+ {
+ advanceChar(ErrorOnEOF.Yes);
+ if(isAtNewline())
+ advanceChar(ErrorOnEOF.Yes);
+ advanceChar(ErrorOnEOF.No);
+ }
+
+ eatWhite();
+ }
+
+ // Date?
+ if(isEOF || (!isDigit(ch) && ch != '-'))
+ mixin(accept!("Value", "date", "", "endOfDate.index"));
+
+ auto startOfTime = location;
+
+ // Is time negative?
+ bool isTimeNegative = ch == '-';
+ if(isTimeNegative)
+ advanceChar(ErrorOnEOF.Yes);
+
+ // Lex hours
+ auto hourStr = ch == '.'? "" : lexNumericFragment();
+
+ // Lex minutes
+ if(ch != ':')
+ {
+ // No minutes found. Therefore we had a plain Date followed
+ // by a numeric literal, not a DateTime.
+ lookaheadTokenInfo.exists = true;
+ lookaheadTokenInfo.numericFragment = hourStr;
+ lookaheadTokenInfo.isNegative = isTimeNegative;
+ lookaheadTokenInfo.tokenStart = startOfTime;
+ mixin(accept!("Value", "date", "", "endOfDate.index"));
+ }
+ advanceChar(ErrorOnEOF.Yes); // Skip ':'
+ auto minuteStr = lexNumericFragment();
+
+ // Lex seconds, if exists
+ string secondStr;
+ if(ch == ':')
+ {
+ advanceChar(ErrorOnEOF.Yes); // Skip ':'
+ secondStr = lexNumericFragment();
+ }
+
+ // Lex milliseconds, if exists
+ string millisecondStr;
+ if(ch == '.')
+ {
+ advanceChar(ErrorOnEOF.Yes); // Skip '.'
+ millisecondStr = lexNumericFragment();
+ }
+
+ auto dateTimeFrac = makeDateTimeFrac(isTimeNegative, date, hourStr, minuteStr, secondStr, millisecondStr);
+
+ // Lex zone, if exists
+ if(ch == '-')
+ {
+ advanceChar(ErrorOnEOF.Yes); // Skip '-'
+ auto timezoneStart = location;
+
+ if(!isAlpha(ch))
+ error("Invalid timezone format.");
+
+ while(!isEOF && !isWhite(ch))
+ advanceChar(ErrorOnEOF.No);
+
+ auto timezoneStr = source[timezoneStart.index..location.index];
+ if(timezoneStr.startsWith("GMT"))
+ {
+ auto isoPart = timezoneStr["GMT".length..$];
+ auto offset = getTimeZoneOffset(isoPart);
+
+ if(offset.isNull())
+ {
+ // Unknown time zone
+ mixin(accept!("Value", "DateTimeFracUnknownZone(dateTimeFrac.dateTime, dateTimeFrac.fracSecs, timezoneStr)"));
+ }
+ else
+ {
+ auto timezone = new immutable SimpleTimeZone(offset.get());
+ mixin(accept!("Value", "SysTime(dateTimeFrac.dateTime, dateTimeFrac.fracSecs, timezone)"));
+ }
+ }
+
+ try
+ {
+ auto timezone = TimeZone.getTimeZone(timezoneStr);
+ if(timezone)
+ mixin(accept!("Value", "SysTime(dateTimeFrac.dateTime, dateTimeFrac.fracSecs, timezone)"));
+ }
+ catch(TimeException e)
+ {
+ // Time zone not found. So just move along to "Unknown time zone" below.
+ }
+
+ // Unknown time zone
+ mixin(accept!("Value", "DateTimeFracUnknownZone(dateTimeFrac.dateTime, dateTimeFrac.fracSecs, timezoneStr)"));
+ }
+
+ if(!isEndOfNumber())
+ error("Date-Times cannot have suffixes.");
+
+ mixin(accept!("Value", "dateTimeFrac"));
+ }
+
+ /// Lex time span (after the initial numeric fragment was lexed)
+ private void lexTimeSpan(bool isNegative, string firstPart)
+ {
+ assert(ch == ':' || ch == 'd');
+
+ string dayStr = "";
+ string hourStr;
+
+ // Lexed days?
+ bool hasDays = ch == 'd';
+ if(hasDays)
+ {
+ dayStr = firstPart;
+ advanceChar(ErrorOnEOF.Yes); // Skip 'd'
+
+ // Lex hours
+ if(ch != ':')
+ error("Invalid time span format: Missing hours.");
+ advanceChar(ErrorOnEOF.Yes); // Skip ':'
+ hourStr = lexNumericFragment();
+ }
+ else
+ hourStr = firstPart;
+
+ // Lex minutes
+ if(ch != ':')
+ error("Invalid time span format: Missing minutes.");
+ advanceChar(ErrorOnEOF.Yes); // Skip ':'
+ auto minuteStr = lexNumericFragment();
+
+ // Lex seconds
+ if(ch != ':')
+ error("Invalid time span format: Missing seconds.");
+ advanceChar(ErrorOnEOF.Yes); // Skip ':'
+ auto secondStr = lexNumericFragment();
+
+ // Lex milliseconds, if exists
+ string millisecondStr = "";
+ if(ch == '.')
+ {
+ advanceChar(ErrorOnEOF.Yes); // Skip '.'
+ millisecondStr = lexNumericFragment();
+ }
+
+ if(!isEndOfNumber())
+ error("Time spans cannot have suffixes.");
+
+ auto duration = makeDuration(isNegative, dayStr, hourStr, minuteStr, secondStr, millisecondStr);
+ mixin(accept!("Value", "duration"));
+ }
+
+ /// Advances past whitespace and comments
+ private void eatWhite(bool allowComments=true)
+ {
+ // -- Comment/Whitepace Lexer -------------
+
+ enum State
+ {
+ normal,
+ lineComment, // Got "#" or "//" or "--", Eating everything until newline
+ blockComment, // Got "/*", Eating everything until "*/"
+ }
+
+ if(isEOF)
+ return;
+
+ Location commentStart;
+ State state = State.normal;
+ bool consumeNewlines = false;
+ bool hasConsumedNewline = false;
+ while(true)
+ {
+ final switch(state)
+ {
+ case State.normal:
+
+ if(ch == '\\')
+ {
+ commentStart = location;
+ consumeNewlines = true;
+ hasConsumedNewline = false;
+ }
+
+ else if(ch == '#')
+ {
+ if(!allowComments)
+ return;
+
+ commentStart = location;
+ state = State.lineComment;
+ continue;
+ }
+
+ else if(ch == '/' || ch == '-')
+ {
+ commentStart = location;
+ if(lookahead(ch))
+ {
+ if(!allowComments)
+ return;
+
+ advanceChar(ErrorOnEOF.No);
+ state = State.lineComment;
+ continue;
+ }
+ else if(ch == '/' && lookahead('*'))
+ {
+ if(!allowComments)
+ return;
+
+ advanceChar(ErrorOnEOF.No);
+ state = State.blockComment;
+ continue;
+ }
+ else
+ return; // Done
+ }
+ else if(isAtNewline())
+ {
+ if(consumeNewlines)
+ hasConsumedNewline = true;
+ else
+ return; // Done
+ }
+ else if(!isWhite(ch))
+ {
+ if(consumeNewlines)
+ {
+ if(hasConsumedNewline)
+ return; // Done
+ else
+ error("Only whitespace can come between a line-continuation backslash and the following newline.");
+ }
+ else
+ return; // Done
+ }
+
+ break;
+
+ case State.lineComment:
+ if(lookahead(&isNewline))
+ state = State.normal;
+ break;
+
+ case State.blockComment:
+ if(ch == '*' && lookahead('/'))
+ {
+ advanceChar(ErrorOnEOF.No);
+ state = State.normal;
+ }
+ break;
+ }
+
+ advanceChar(ErrorOnEOF.No);
+ if(isEOF)
+ {
+ // Reached EOF
+
+ if(consumeNewlines && !hasConsumedNewline)
+ error("Missing newline after line-continuation backslash.");
+
+ else if(state == State.blockComment)
+ error(commentStart, "Unterminated block comment.");
+
+ else
+ return; // Done, reached EOF
+ }
+ }
+ }
+}
+
+version(sdlangUnittest)
+{
+ import std.stdio;
+
+ private auto loc = Location("filename", 0, 0, 0);
+ private auto loc2 = Location("a", 1, 1, 1);
+
+ unittest
+ {
+ assert([Token(symbol!"EOL",loc) ] == [Token(symbol!"EOL",loc) ] );
+ assert([Token(symbol!"EOL",loc,Value(7),"A")] == [Token(symbol!"EOL",loc2,Value(7),"B")] );
+ }
+
+ private int numErrors = 0;
+ private void testLex(string source, Token[] expected, bool test_locations = false, string file=__FILE__, size_t line=__LINE__)
+ {
+ Token[] actual;
+ try
+ actual = lexSource(source, "filename");
+ catch(SDLangParseException e)
+ {
+ numErrors++;
+ stderr.writeln(file, "(", line, "): testLex failed on: ", source);
+ stderr.writeln(" Expected:");
+ stderr.writeln(" ", expected);
+ stderr.writeln(" Actual: SDLangParseException thrown:");
+ stderr.writeln(" ", e.msg);
+ return;
+ }
+
+ bool is_same = actual == expected;
+ if (is_same && test_locations) {
+ is_same = actual.map!(t => t.location).equal(expected.map!(t => t.location));
+ }
+
+ if(!is_same)
+ {
+ numErrors++;
+ stderr.writeln(file, "(", line, "): testLex failed on: ", source);
+ stderr.writeln(" Expected:");
+ stderr.writeln(" ", expected);
+ stderr.writeln(" Actual:");
+ stderr.writeln(" ", actual);
+
+ if(expected.length > 1 || actual.length > 1)
+ {
+ stderr.writeln(" expected.length: ", expected.length);
+ stderr.writeln(" actual.length: ", actual.length);
+
+ if(actual.length == expected.length)
+ foreach(i; 0..actual.length)
+ if(actual[i] != expected[i])
+ {
+ stderr.writeln(" Unequal at index #", i, ":");
+ stderr.writeln(" Expected:");
+ stderr.writeln(" ", expected[i]);
+ stderr.writeln(" Actual:");
+ stderr.writeln(" ", actual[i]);
+ }
+ }
+ }
+ }
+
+ private void testLexThrows(string file=__FILE__, size_t line=__LINE__)(string source)
+ {
+ bool hadException = false;
+ Token[] actual;
+ try
+ actual = lexSource(source, "filename");
+ catch(SDLangParseException e)
+ hadException = true;
+
+ if(!hadException)
+ {
+ numErrors++;
+ stderr.writeln(file, "(", line, "): testLex failed on: ", source);
+ stderr.writeln(" Expected SDLangParseException");
+ stderr.writeln(" Actual:");
+ stderr.writeln(" ", actual);
+ }
+ }
+}
+
+version(sdlangUnittest)
+unittest
+{
+ writeln("Unittesting sdlang lexer...");
+ stdout.flush();
+
+ testLex("", []);
+ testLex(" ", []);
+ testLex("\\\n", []);
+ testLex("/*foo*/", []);
+ testLex("/* multiline \n comment */", []);
+ testLex("/* * */", []);
+ testLexThrows("/* ");
+
+ testLex(":", [ Token(symbol!":", loc) ]);
+ testLex("=", [ Token(symbol!"=", loc) ]);
+ testLex("{", [ Token(symbol!"{", loc) ]);
+ testLex("}", [ Token(symbol!"}", loc) ]);
+ testLex(";", [ Token(symbol!"EOL",loc) ]);
+ testLex("\n", [ Token(symbol!"EOL",loc) ]);
+
+ testLex("foo", [ Token(symbol!"Ident",loc,Value(null),"foo") ]);
+ testLex("_foo", [ Token(symbol!"Ident",loc,Value(null),"_foo") ]);
+ testLex("foo.bar", [ Token(symbol!"Ident",loc,Value(null),"foo.bar") ]);
+ testLex("foo-bar", [ Token(symbol!"Ident",loc,Value(null),"foo-bar") ]);
+ testLex("foo.", [ Token(symbol!"Ident",loc,Value(null),"foo.") ]);
+ testLex("foo-", [ Token(symbol!"Ident",loc,Value(null),"foo-") ]);
+ testLexThrows(".foo");
+
+ testLex("foo bar", [
+ Token(symbol!"Ident",loc,Value(null),"foo"),
+ Token(symbol!"Ident",loc,Value(null),"bar"),
+ ]);
+ testLex("foo \\ \n \n bar", [
+ Token(symbol!"Ident",loc,Value(null),"foo"),
+ Token(symbol!"Ident",loc,Value(null),"bar"),
+ ]);
+ testLex("foo \\ \n \\ \n bar", [
+ Token(symbol!"Ident",loc,Value(null),"foo"),
+ Token(symbol!"Ident",loc,Value(null),"bar"),
+ ]);
+ testLexThrows("foo \\ ");
+ testLexThrows("foo \\ bar");
+ testLexThrows("foo \\ \n \\ ");
+ testLexThrows("foo \\ \n \\ bar");
+
+ testLex("foo : = { } ; \n bar \n", [
+ Token(symbol!"Ident",loc,Value(null),"foo"),
+ Token(symbol!":",loc),
+ Token(symbol!"=",loc),
+ Token(symbol!"{",loc),
+ Token(symbol!"}",loc),
+ Token(symbol!"EOL",loc),
+ Token(symbol!"EOL",loc),
+ Token(symbol!"Ident",loc,Value(null),"bar"),
+ Token(symbol!"EOL",loc),
+ ]);
+
+ testLexThrows("<");
+ testLexThrows("*");
+ testLexThrows(`\`);
+
+ // Integers
+ testLex( "7", [ Token(symbol!"Value",loc,Value(cast( int) 7)) ]);
+ testLex( "-7", [ Token(symbol!"Value",loc,Value(cast( int)-7)) ]);
+ testLex( "7L", [ Token(symbol!"Value",loc,Value(cast(long) 7)) ]);
+ testLex( "7l", [ Token(symbol!"Value",loc,Value(cast(long) 7)) ]);
+ testLex("-7L", [ Token(symbol!"Value",loc,Value(cast(long)-7)) ]);
+ testLex( "0", [ Token(symbol!"Value",loc,Value(cast( int) 0)) ]);
+ testLex( "-0", [ Token(symbol!"Value",loc,Value(cast( int) 0)) ]);
+
+ testLex("7/**/", [ Token(symbol!"Value",loc,Value(cast( int) 7)) ]);
+ testLex("7#", [ Token(symbol!"Value",loc,Value(cast( int) 7)) ]);
+
+ testLex("7 A", [
+ Token(symbol!"Value",loc,Value(cast(int)7)),
+ Token(symbol!"Ident",loc,Value( null),"A"),
+ ]);
+ testLexThrows("7A");
+ testLexThrows("-A");
+ testLexThrows(`-""`);
+
+ testLex("7;", [
+ Token(symbol!"Value",loc,Value(cast(int)7)),
+ Token(symbol!"EOL",loc),
+ ]);
+
+ // Floats
+ testLex("1.2F" , [ Token(symbol!"Value",loc,Value(cast( float)1.2)) ]);
+ testLex("1.2f" , [ Token(symbol!"Value",loc,Value(cast( float)1.2)) ]);
+ testLex("1.2" , [ Token(symbol!"Value",loc,Value(cast(double)1.2)) ]);
+ testLex("1.2D" , [ Token(symbol!"Value",loc,Value(cast(double)1.2)) ]);
+ testLex("1.2d" , [ Token(symbol!"Value",loc,Value(cast(double)1.2)) ]);
+ testLex("1.2BD", [ Token(symbol!"Value",loc,Value(cast( real)1.2)) ]);
+ testLex("1.2bd", [ Token(symbol!"Value",loc,Value(cast( real)1.2)) ]);
+ testLex("1.2Bd", [ Token(symbol!"Value",loc,Value(cast( real)1.2)) ]);
+ testLex("1.2bD", [ Token(symbol!"Value",loc,Value(cast( real)1.2)) ]);
+
+ testLex(".2F" , [ Token(symbol!"Value",loc,Value(cast( float)0.2)) ]);
+ testLex(".2" , [ Token(symbol!"Value",loc,Value(cast(double)0.2)) ]);
+ testLex(".2D" , [ Token(symbol!"Value",loc,Value(cast(double)0.2)) ]);
+ testLex(".2BD", [ Token(symbol!"Value",loc,Value(cast( real)0.2)) ]);
+
+ testLex("-1.2F" , [ Token(symbol!"Value",loc,Value(cast( float)-1.2)) ]);
+ testLex("-1.2" , [ Token(symbol!"Value",loc,Value(cast(double)-1.2)) ]);
+ testLex("-1.2D" , [ Token(symbol!"Value",loc,Value(cast(double)-1.2)) ]);
+ testLex("-1.2BD", [ Token(symbol!"Value",loc,Value(cast( real)-1.2)) ]);
+
+ testLex("-.2F" , [ Token(symbol!"Value",loc,Value(cast( float)-0.2)) ]);
+ testLex("-.2" , [ Token(symbol!"Value",loc,Value(cast(double)-0.2)) ]);
+ testLex("-.2D" , [ Token(symbol!"Value",loc,Value(cast(double)-0.2)) ]);
+ testLex("-.2BD", [ Token(symbol!"Value",loc,Value(cast( real)-0.2)) ]);
+
+ testLex( "0.0" , [ Token(symbol!"Value",loc,Value(cast(double)0.0)) ]);
+ testLex( "0.0F" , [ Token(symbol!"Value",loc,Value(cast( float)0.0)) ]);
+ testLex( "0.0BD", [ Token(symbol!"Value",loc,Value(cast( real)0.0)) ]);
+ testLex("-0.0" , [ Token(symbol!"Value",loc,Value(cast(double)0.0)) ]);
+ testLex("-0.0F" , [ Token(symbol!"Value",loc,Value(cast( float)0.0)) ]);
+ testLex("-0.0BD", [ Token(symbol!"Value",loc,Value(cast( real)0.0)) ]);
+ testLex( "7F" , [ Token(symbol!"Value",loc,Value(cast( float)7.0)) ]);
+ testLex( "7D" , [ Token(symbol!"Value",loc,Value(cast(double)7.0)) ]);
+ testLex( "7BD" , [ Token(symbol!"Value",loc,Value(cast( real)7.0)) ]);
+ testLex( "0F" , [ Token(symbol!"Value",loc,Value(cast( float)0.0)) ]);
+ testLex( "0D" , [ Token(symbol!"Value",loc,Value(cast(double)0.0)) ]);
+ testLex( "0BD" , [ Token(symbol!"Value",loc,Value(cast( real)0.0)) ]);
+ testLex("-0F" , [ Token(symbol!"Value",loc,Value(cast( float)0.0)) ]);
+ testLex("-0D" , [ Token(symbol!"Value",loc,Value(cast(double)0.0)) ]);
+ testLex("-0BD" , [ Token(symbol!"Value",loc,Value(cast( real)0.0)) ]);
+
+ testLex("1.2 F", [
+ Token(symbol!"Value",loc,Value(cast(double)1.2)),
+ Token(symbol!"Ident",loc,Value( null),"F"),
+ ]);
+ testLexThrows("1.2A");
+ testLexThrows("1.2B");
+ testLexThrows("1.2BDF");
+
+ testLex("1.2;", [
+ Token(symbol!"Value",loc,Value(cast(double)1.2)),
+ Token(symbol!"EOL",loc),
+ ]);
+
+ testLex("1.2F;", [
+ Token(symbol!"Value",loc,Value(cast(float)1.2)),
+ Token(symbol!"EOL",loc),
+ ]);
+
+ testLex("1.2BD;", [
+ Token(symbol!"Value",loc,Value(cast(real)1.2)),
+ Token(symbol!"EOL",loc),
+ ]);
+
+ // Booleans and null
+ testLex("true", [ Token(symbol!"Value",loc,Value( true)) ]);
+ testLex("false", [ Token(symbol!"Value",loc,Value(false)) ]);
+ testLex("on", [ Token(symbol!"Value",loc,Value( true)) ]);
+ testLex("off", [ Token(symbol!"Value",loc,Value(false)) ]);
+ testLex("null", [ Token(symbol!"Value",loc,Value( null)) ]);
+
+ testLex("TRUE", [ Token(symbol!"Ident",loc,Value(null),"TRUE") ]);
+ testLex("true ", [ Token(symbol!"Value",loc,Value(true)) ]);
+ testLex("true ", [ Token(symbol!"Value",loc,Value(true)) ]);
+ testLex("tru", [ Token(symbol!"Ident",loc,Value(null),"tru") ]);
+ testLex("truX", [ Token(symbol!"Ident",loc,Value(null),"truX") ]);
+ testLex("trueX", [ Token(symbol!"Ident",loc,Value(null),"trueX") ]);
+
+ // Raw Backtick Strings
+ testLex("`hello world`", [ Token(symbol!"Value",loc,Value(`hello world` )) ]);
+ testLex("` hello world `", [ Token(symbol!"Value",loc,Value(` hello world ` )) ]);
+ testLex("`hello \\t world`", [ Token(symbol!"Value",loc,Value(`hello \t world`)) ]);
+ testLex("`hello \\n world`", [ Token(symbol!"Value",loc,Value(`hello \n world`)) ]);
+ testLex("`hello \n world`", [ Token(symbol!"Value",loc,Value("hello \n world")) ]);
+ testLex("`hello \r\n world`", [ Token(symbol!"Value",loc,Value("hello \r\n world")) ]);
+ testLex("`hello \"world\"`", [ Token(symbol!"Value",loc,Value(`hello "world"` )) ]);
+
+ testLexThrows("`foo");
+ testLexThrows("`");
+
+ // Double-Quote Strings
+ testLex(`"hello world"`, [ Token(symbol!"Value",loc,Value("hello world" )) ]);
+ testLex(`" hello world "`, [ Token(symbol!"Value",loc,Value(" hello world " )) ]);
+ testLex(`"hello \t world"`, [ Token(symbol!"Value",loc,Value("hello \t world")) ]);
+ testLex(`"hello \n world"`, [ Token(symbol!"Value",loc,Value("hello \n world")) ]);
+ testLex("\"hello \\\n world\"", [ Token(symbol!"Value",loc,Value("hello world" )) ]);
+ testLex("\"hello \\ \n world\"", [ Token(symbol!"Value",loc,Value("hello world" )) ]);
+ testLex("\"hello \\ \n\n world\"", [ Token(symbol!"Value",loc,Value("hello world" )) ]);
+ testLex(`"\"hello world\""`, [ Token(symbol!"Value",loc,Value(`"hello world"` )) ]);
+ testLex(`""`, [ Token(symbol!"Value",loc,Value("" )) ]); // issue #34
+
+ testLexThrows("\"hello \n world\"");
+ testLexThrows(`"foo`);
+ testLexThrows(`"`);
+
+ // Characters
+ testLex("'a'", [ Token(symbol!"Value",loc,Value(cast(dchar) 'a')) ]);
+ testLex("'\\n'", [ Token(symbol!"Value",loc,Value(cast(dchar)'\n')) ]);
+ testLex("'\\t'", [ Token(symbol!"Value",loc,Value(cast(dchar)'\t')) ]);
+ testLex("'\t'", [ Token(symbol!"Value",loc,Value(cast(dchar)'\t')) ]);
+ testLex("'\\''", [ Token(symbol!"Value",loc,Value(cast(dchar)'\'')) ]);
+ testLex(`'\\'`, [ Token(symbol!"Value",loc,Value(cast(dchar)'\\')) ]);
+
+ testLexThrows("'a");
+ testLexThrows("'aa'");
+ testLexThrows("''");
+ testLexThrows("'\\\n'");
+ testLexThrows("'\n'");
+ testLexThrows(`'\`);
+ testLexThrows(`'\'`);
+ testLexThrows("'");
+
+ // Unicode
+ testLex("日本語", [ Token(symbol!"Ident",loc,Value(null), "日本語") ]);
+ testLex("`おはよう、日本。`", [ Token(symbol!"Value",loc,Value(`おはよう、日本。`)) ]);
+ testLex(`"おはよう、日本。"`, [ Token(symbol!"Value",loc,Value(`おはよう、日本。`)) ]);
+ testLex("'月'", [ Token(symbol!"Value",loc,Value("月"d.dup[0])) ]);
+
+ // Base64 Binary
+ testLex("[aGVsbG8gd29ybGQ=]", [ Token(symbol!"Value",loc,Value(cast(ubyte[])"hello world".dup))]);
+ testLex("[ aGVsbG8gd29ybGQ= ]", [ Token(symbol!"Value",loc,Value(cast(ubyte[])"hello world".dup))]);
+ testLex("[\n aGVsbG8g \n \n d29ybGQ= \n]", [ Token(symbol!"Value",loc,Value(cast(ubyte[])"hello world".dup))]);
+
+ testLexThrows("[aGVsbG8gd29ybGQ]"); // Ie: Not multiple of 4
+ testLexThrows("[ aGVsbG8gd29ybGQ ]");
+
+ // Date
+ testLex( "1999/12/5", [ Token(symbol!"Value",loc,Value(Date( 1999, 12, 5))) ]);
+ testLex( "2013/2/22", [ Token(symbol!"Value",loc,Value(Date( 2013, 2, 22))) ]);
+ testLex("-2013/2/22", [ Token(symbol!"Value",loc,Value(Date(-2013, 2, 22))) ]);
+
+ testLexThrows("7/");
+ testLexThrows("2013/2/22a");
+ testLexThrows("2013/2/22f");
+
+ testLex("1999/12/5\n", [
+ Token(symbol!"Value",loc,Value(Date(1999, 12, 5))),
+ Token(symbol!"EOL",loc),
+ ]);
+
+ // DateTime, no timezone
+ testLex( "2013/2/22 07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22 \t 07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22/*foo*/07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22 /*foo*/ \\\n /*bar*/ 07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22 /*foo*/ \\\n\n \n /*bar*/ 07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22 /*foo*/ \\\n\\\n \\\n /*bar*/ 07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22/*foo*/\\\n/*bar*/07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0)))) ]);
+ testLex("-2013/2/22 07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime(-2013, 2, 22, 7, 53, 0)))) ]);
+ testLex( "2013/2/22 -07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) - hours(7) - minutes(53)))) ]);
+ testLex("-2013/2/22 -07:53", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime(-2013, 2, 22, 0, 0, 0) - hours(7) - minutes(53)))) ]);
+ testLex( "2013/2/22 07:53:34", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 34)))) ]);
+ testLex( "2013/2/22 07:53:34.123", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 34), 123.msecs))) ]);
+ testLex( "2013/2/22 07:53:34.12", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 34), 120.msecs))) ]);
+ testLex( "2013/2/22 07:53:34.1", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 34), 100.msecs))) ]);
+ testLex( "2013/2/22 07:53.123", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 7, 53, 0), 123.msecs))) ]);
+
+ testLex( "2013/2/22 34:65", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) + hours(34) + minutes(65) + seconds( 0)))) ]);
+ testLex( "2013/2/22 34:65:77.123", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) + hours(34) + minutes(65) + seconds(77), 123.msecs))) ]);
+ testLex( "2013/2/22 34:65.123", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) + hours(34) + minutes(65) + seconds( 0), 123.msecs))) ]);
+
+ testLex( "2013/2/22 -34:65", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) - hours(34) - minutes(65) - seconds( 0)))) ]);
+ testLex( "2013/2/22 -34:65:77.123", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) - hours(34) - minutes(65) - seconds(77), -123.msecs))) ]);
+ testLex( "2013/2/22 -34:65.123", [ Token(symbol!"Value",loc,Value(DateTimeFrac(DateTime( 2013, 2, 22, 0, 0, 0) - hours(34) - minutes(65) - seconds( 0), -123.msecs))) ]);
+
+ testLexThrows("2013/2/22 07:53a");
+ testLexThrows("2013/2/22 07:53f");
+ testLexThrows("2013/2/22 07:53:34.123a");
+ testLexThrows("2013/2/22 07:53:34.123f");
+ testLexThrows("2013/2/22a 07:53");
+
+ testLex(`2013/2/22 "foo"`, [
+ Token(symbol!"Value",loc,Value(Date(2013, 2, 22))),
+ Token(symbol!"Value",loc,Value("foo")),
+ ]);
+
+ testLex("2013/2/22 07", [
+ Token(symbol!"Value",loc,Value(Date(2013, 2, 22))),
+ Token(symbol!"Value",loc,Value(cast(int)7)),
+ ]);
+
+ testLex("2013/2/22 1.2F", [
+ Token(symbol!"Value",loc,Value(Date(2013, 2, 22))),
+ Token(symbol!"Value",loc,Value(cast(float)1.2)),
+ ]);
+
+ testLex("2013/2/22 .2F", [
+ Token(symbol!"Value",loc,Value(Date(2013, 2, 22))),
+ Token(symbol!"Value",loc,Value(cast(float)0.2)),
+ ]);
+
+ testLex("2013/2/22 -1.2F", [
+ Token(symbol!"Value",loc,Value(Date(2013, 2, 22))),
+ Token(symbol!"Value",loc,Value(cast(float)-1.2)),
+ ]);
+
+ testLex("2013/2/22 -.2F", [
+ Token(symbol!"Value",loc,Value(Date(2013, 2, 22))),
+ Token(symbol!"Value",loc,Value(cast(float)-0.2)),
+ ]);
+
+ // DateTime, with known timezone
+ testLex( "2013/2/22 07:53-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 0), new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex("-2013/2/22 07:53-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime(-2013, 2, 22, 7, 53, 0), new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex( "2013/2/22 -07:53-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 0, 0, 0) - hours(7) - minutes(53), new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex("-2013/2/22 -07:53-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime(-2013, 2, 22, 0, 0, 0) - hours(7) - minutes(53), new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex( "2013/2/22 07:53-GMT+02:10", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 0), new immutable SimpleTimeZone( hours(2)+minutes(10))))) ]);
+ testLex( "2013/2/22 07:53-GMT-05:30", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 0), new immutable SimpleTimeZone(-hours(5)-minutes(30))))) ]);
+ testLex( "2013/2/22 07:53:34-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 34), new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex( "2013/2/22 07:53:34-GMT+02:10", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 34), new immutable SimpleTimeZone( hours(2)+minutes(10))))) ]);
+ testLex( "2013/2/22 07:53:34-GMT-05:30", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 34), new immutable SimpleTimeZone(-hours(5)-minutes(30))))) ]);
+ testLex( "2013/2/22 07:53:34.123-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 34), 123.msecs, new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex( "2013/2/22 07:53:34.123-GMT+02:10", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 34), 123.msecs, new immutable SimpleTimeZone( hours(2)+minutes(10))))) ]);
+ testLex( "2013/2/22 07:53:34.123-GMT-05:30", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 34), 123.msecs, new immutable SimpleTimeZone(-hours(5)-minutes(30))))) ]);
+ testLex( "2013/2/22 07:53.123-GMT+00:00", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 0), 123.msecs, new immutable SimpleTimeZone( hours(0) )))) ]);
+ testLex( "2013/2/22 07:53.123-GMT+02:10", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 0), 123.msecs, new immutable SimpleTimeZone( hours(2)+minutes(10))))) ]);
+ testLex( "2013/2/22 07:53.123-GMT-05:30", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 7, 53, 0), 123.msecs, new immutable SimpleTimeZone(-hours(5)-minutes(30))))) ]);
+
+ testLex( "2013/2/22 -34:65-GMT-05:30", [ Token(symbol!"Value",loc,Value(SysTime(DateTime( 2013, 2, 22, 0, 0, 0) - hours(34) - minutes(65) - seconds( 0), new immutable SimpleTimeZone(-hours(5)-minutes(30))))) ]);
+
+ // DateTime, with Java SDL's occasionally weird interpretation of some
+ // "not quite ISO" variations of the "GMT with offset" timezone strings.
+ Token testTokenSimpleTimeZone(Duration d)
+ {
+ auto dateTime = DateTime(2013, 2, 22, 7, 53, 0);
+ auto tz = new immutable SimpleTimeZone(d);
+ return Token( symbol!"Value", loc, Value(SysTime(dateTime,tz)) );
+ }
+ Token testTokenUnknownTimeZone(string tzName)
+ {
+ auto dateTime = DateTime(2013, 2, 22, 7, 53, 0);
+ auto frac = 0.msecs;
+ return Token( symbol!"Value", loc, Value(DateTimeFracUnknownZone(dateTime,frac,tzName)) );
+ }
+ testLex("2013/2/22 07:53-GMT+", [ testTokenUnknownTimeZone("GMT+") ]);
+ testLex("2013/2/22 07:53-GMT+:", [ testTokenUnknownTimeZone("GMT+:") ]);
+ testLex("2013/2/22 07:53-GMT+:3", [ testTokenUnknownTimeZone("GMT+:3") ]);
+ testLex("2013/2/22 07:53-GMT+:03", [ testTokenSimpleTimeZone(minutes(3)) ]);
+ testLex("2013/2/22 07:53-GMT+:003", [ testTokenUnknownTimeZone("GMT+:003") ]);
+
+ testLex("2013/2/22 07:53-GMT+4", [ testTokenSimpleTimeZone(hours(4)) ]);
+ testLex("2013/2/22 07:53-GMT+4:", [ testTokenUnknownTimeZone("GMT+4:") ]);
+ testLex("2013/2/22 07:53-GMT+4:3", [ testTokenUnknownTimeZone("GMT+4:3") ]);
+ testLex("2013/2/22 07:53-GMT+4:03", [ testTokenSimpleTimeZone(hours(4)+minutes(3)) ]);
+ testLex("2013/2/22 07:53-GMT+4:003", [ testTokenUnknownTimeZone("GMT+4:003") ]);
+
+ testLex("2013/2/22 07:53-GMT+04", [ testTokenSimpleTimeZone(hours(4)) ]);
+ testLex("2013/2/22 07:53-GMT+04:", [ testTokenUnknownTimeZone("GMT+04:") ]);
+ testLex("2013/2/22 07:53-GMT+04:3", [ testTokenUnknownTimeZone("GMT+04:3") ]);
+ testLex("2013/2/22 07:53-GMT+04:03", [ testTokenSimpleTimeZone(hours(4)+minutes(3)) ]);
+ testLex("2013/2/22 07:53-GMT+04:03abc", [ testTokenUnknownTimeZone("GMT+04:03abc") ]);
+ testLex("2013/2/22 07:53-GMT+04:003", [ testTokenUnknownTimeZone("GMT+04:003") ]);
+
+ testLex("2013/2/22 07:53-GMT+004", [ testTokenSimpleTimeZone(minutes(4)) ]);
+ testLex("2013/2/22 07:53-GMT+004:", [ testTokenUnknownTimeZone("GMT+004:") ]);
+ testLex("2013/2/22 07:53-GMT+004:3", [ testTokenUnknownTimeZone("GMT+004:3") ]);
+ testLex("2013/2/22 07:53-GMT+004:03", [ testTokenUnknownTimeZone("GMT+004:03") ]);
+ testLex("2013/2/22 07:53-GMT+004:003", [ testTokenUnknownTimeZone("GMT+004:003") ]);
+
+ testLex("2013/2/22 07:53-GMT+0004", [ testTokenSimpleTimeZone(minutes(4)) ]);
+ testLex("2013/2/22 07:53-GMT+0004:", [ testTokenUnknownTimeZone("GMT+0004:") ]);
+ testLex("2013/2/22 07:53-GMT+0004:3", [ testTokenUnknownTimeZone("GMT+0004:3") ]);
+ testLex("2013/2/22 07:53-GMT+0004:03", [ testTokenUnknownTimeZone("GMT+0004:03") ]);
+ testLex("2013/2/22 07:53-GMT+0004:003", [ testTokenUnknownTimeZone("GMT+0004:003") ]);
+
+ testLex("2013/2/22 07:53-GMT+00004", [ testTokenSimpleTimeZone(minutes(4)) ]);
+ testLex("2013/2/22 07:53-GMT+00004:", [ testTokenUnknownTimeZone("GMT+00004:") ]);
+ testLex("2013/2/22 07:53-GMT+00004:3", [ testTokenUnknownTimeZone("GMT+00004:3") ]);
+ testLex("2013/2/22 07:53-GMT+00004:03", [ testTokenUnknownTimeZone("GMT+00004:03") ]);
+ testLex("2013/2/22 07:53-GMT+00004:003", [ testTokenUnknownTimeZone("GMT+00004:003") ]);
+
+ // DateTime, with unknown timezone
+ testLex( "2013/2/22 07:53-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime( 2013, 2, 22, 7, 53, 0), 0.msecs, "Bogus/Foo")), "2013/2/22 07:53-Bogus/Foo") ]);
+ testLex("-2013/2/22 07:53-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime(-2013, 2, 22, 7, 53, 0), 0.msecs, "Bogus/Foo"))) ]);
+ testLex( "2013/2/22 -07:53-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime( 2013, 2, 22, 0, 0, 0) - hours(7) - minutes(53), 0.msecs, "Bogus/Foo"))) ]);
+ testLex("-2013/2/22 -07:53-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime(-2013, 2, 22, 0, 0, 0) - hours(7) - minutes(53), 0.msecs, "Bogus/Foo"))) ]);
+ testLex( "2013/2/22 07:53:34-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime( 2013, 2, 22, 7, 53, 34), 0.msecs, "Bogus/Foo"))) ]);
+ testLex( "2013/2/22 07:53:34.123-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime( 2013, 2, 22, 7, 53, 34), 123.msecs, "Bogus/Foo"))) ]);
+ testLex( "2013/2/22 07:53.123-Bogus/Foo", [ Token(symbol!"Value",loc,Value(DateTimeFracUnknownZone(DateTime( 2013, 2, 22, 7, 53, 0), 123.msecs, "Bogus/Foo"))) ]);
+
+ // Time Span
+ testLex( "12:14:42", [ Token(symbol!"Value",loc,Value( days( 0)+hours(12)+minutes(14)+seconds(42)+msecs( 0))) ]);
+ testLex("-12:14:42", [ Token(symbol!"Value",loc,Value(-days( 0)-hours(12)-minutes(14)-seconds(42)-msecs( 0))) ]);
+ testLex( "00:09:12", [ Token(symbol!"Value",loc,Value( days( 0)+hours( 0)+minutes( 9)+seconds(12)+msecs( 0))) ]);
+ testLex( "00:00:01.023", [ Token(symbol!"Value",loc,Value( days( 0)+hours( 0)+minutes( 0)+seconds( 1)+msecs( 23))) ]);
+ testLex( "23d:05:21:23.532", [ Token(symbol!"Value",loc,Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs(532))) ]);
+ testLex( "23d:05:21:23.53", [ Token(symbol!"Value",loc,Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs(530))) ]);
+ testLex( "23d:05:21:23.5", [ Token(symbol!"Value",loc,Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs(500))) ]);
+ testLex("-23d:05:21:23.532", [ Token(symbol!"Value",loc,Value(-days(23)-hours( 5)-minutes(21)-seconds(23)-msecs(532))) ]);
+ testLex("-23d:05:21:23.5", [ Token(symbol!"Value",loc,Value(-days(23)-hours( 5)-minutes(21)-seconds(23)-msecs(500))) ]);
+ testLex( "23d:05:21:23", [ Token(symbol!"Value",loc,Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs( 0))) ]);
+
+ testLexThrows("12:14:42a");
+ testLexThrows("23d:05:21:23.532a");
+ testLexThrows("23d:05:21:23.532f");
+
+ // Combination
+ testLex("foo. 7", [
+ Token(symbol!"Ident",loc,Value( null),"foo."),
+ Token(symbol!"Value",loc,Value(cast(int)7))
+ ]);
+
+ testLex(`
+ namespace:person "foo" "bar" 1 23L name.first="ひとみ" name.last="Smith" {
+ namespace:age 37; namespace:favorite_color "blue" // comment
+ somedate 2013/2/22 07:53 -- comment
+
+ inventory /* comment */ {
+ socks
+ }
+ }
+ `,
+ [
+ Token(symbol!"EOL",loc,Value(null),"\n"),
+
+ Token(symbol!"Ident", loc, Value( null ), "namespace"),
+ Token(symbol!":", loc, Value( null ), ":"),
+ Token(symbol!"Ident", loc, Value( null ), "person"),
+ Token(symbol!"Value", loc, Value( "foo" ), `"foo"`),
+ Token(symbol!"Value", loc, Value( "bar" ), `"bar"`),
+ Token(symbol!"Value", loc, Value( cast( int) 1 ), "1"),
+ Token(symbol!"Value", loc, Value( cast(long)23 ), "23L"),
+ Token(symbol!"Ident", loc, Value( null ), "name.first"),
+ Token(symbol!"=", loc, Value( null ), "="),
+ Token(symbol!"Value", loc, Value( "ひとみ" ), `"ひとみ"`),
+ Token(symbol!"Ident", loc, Value( null ), "name.last"),
+ Token(symbol!"=", loc, Value( null ), "="),
+ Token(symbol!"Value", loc, Value( "Smith" ), `"Smith"`),
+ Token(symbol!"{", loc, Value( null ), "{"),
+ Token(symbol!"EOL", loc, Value( null ), "\n"),
+
+ Token(symbol!"Ident", loc, Value( null ), "namespace"),
+ Token(symbol!":", loc, Value( null ), ":"),
+ Token(symbol!"Ident", loc, Value( null ), "age"),
+ Token(symbol!"Value", loc, Value( cast(int)37 ), "37"),
+ Token(symbol!"EOL", loc, Value( null ), ";"),
+ Token(symbol!"Ident", loc, Value( null ), "namespace"),
+ Token(symbol!":", loc, Value( null ), ":"),
+ Token(symbol!"Ident", loc, Value( null ), "favorite_color"),
+ Token(symbol!"Value", loc, Value( "blue" ), `"blue"`),
+ Token(symbol!"EOL", loc, Value( null ), "\n"),
+
+ Token(symbol!"Ident", loc, Value( null ), "somedate"),
+ Token(symbol!"Value", loc, Value( DateTimeFrac(DateTime(2013, 2, 22, 7, 53, 0)) ), "2013/2/22 07:53"),
+ Token(symbol!"EOL", loc, Value( null ), "\n"),
+ Token(symbol!"EOL", loc, Value( null ), "\n"),
+
+ Token(symbol!"Ident", loc, Value(null), "inventory"),
+ Token(symbol!"{", loc, Value(null), "{"),
+ Token(symbol!"EOL", loc, Value(null), "\n"),
+
+ Token(symbol!"Ident", loc, Value(null), "socks"),
+ Token(symbol!"EOL", loc, Value(null), "\n"),
+
+ Token(symbol!"}", loc, Value(null), "}"),
+ Token(symbol!"EOL", loc, Value(null), "\n"),
+
+ Token(symbol!"}", loc, Value(null), "}"),
+ Token(symbol!"EOL", loc, Value(null), "\n"),
+ ]);
+
+ if(numErrors > 0)
+ stderr.writeln(numErrors, " failed test(s)");
+}
+
+version(sdlangUnittest)
+unittest
+{
+ writeln("lexer: Regression test issue #8...");
+ stdout.flush();
+
+ testLex(`"\n \n"`, [ Token(symbol!"Value",loc,Value("\n \n"),`"\n \n"`) ]);
+ testLex(`"\t\t"`, [ Token(symbol!"Value",loc,Value("\t\t"),`"\t\t"`) ]);
+ testLex(`"\n\n"`, [ Token(symbol!"Value",loc,Value("\n\n"),`"\n\n"`) ]);
+}
+
+version(sdlangUnittest)
+unittest
+{
+ writeln("lexer: Regression test issue #11...");
+ stdout.flush();
+
+ void test(string input)
+ {
+ testLex(
+ input,
+ [
+ Token(symbol!"EOL", loc, Value(null), "\n"),
+ Token(symbol!"Ident",loc,Value(null), "a")
+ ]
+ );
+ }
+
+ test("//X\na");
+ test("//\na");
+ test("--\na");
+ test("#\na");
+}
+
+version(sdlangUnittest)
+unittest
+{
+ writeln("lexer: Regression test issue #28...");
+ stdout.flush();
+
+ enum offset = 1; // workaround for an of-by-one error for line numbers
+ testLex("test", [
+ Token(symbol!"Ident", Location("filename", 0, 0, 0), Value(null), "test")
+ ], true);
+ testLex("\ntest", [
+ Token(symbol!"EOL", Location("filename", 0, 0, 0), Value(null), "\n"),
+ Token(symbol!"Ident", Location("filename", 1, 0, 1), Value(null), "test")
+ ], true);
+ testLex("\rtest", [
+ Token(symbol!"EOL", Location("filename", 0, 0, 0), Value(null), "\r"),
+ Token(symbol!"Ident", Location("filename", 1, 0, 1), Value(null), "test")
+ ], true);
+ testLex("\r\ntest", [
+ Token(symbol!"EOL", Location("filename", 0, 0, 0), Value(null), "\r\n"),
+ Token(symbol!"Ident", Location("filename", 1, 0, 2), Value(null), "test")
+ ], true);
+ testLex("\r\n\ntest", [
+ Token(symbol!"EOL", Location("filename", 0, 0, 0), Value(null), "\r\n"),
+ Token(symbol!"EOL", Location("filename", 1, 0, 2), Value(null), "\n"),
+ Token(symbol!"Ident", Location("filename", 2, 0, 3), Value(null), "test")
+ ], true);
+ testLex("\r\r\ntest", [
+ Token(symbol!"EOL", Location("filename", 0, 0, 0), Value(null), "\r"),
+ Token(symbol!"EOL", Location("filename", 1, 0, 1), Value(null), "\r\n"),
+ Token(symbol!"Ident", Location("filename", 2, 0, 3), Value(null), "test")
+ ], true);
+}
diff --git a/src/sdlang/libinputvisitor/dub.json b/src/sdlang/libinputvisitor/dub.json
new file mode 100644
index 0000000..6e273c8
--- /dev/null
+++ b/src/sdlang/libinputvisitor/dub.json
@@ -0,0 +1,10 @@
+{
+ "name": "libinputvisitor",
+ "description": "Write D input range generators in a straightforward coroutine style",
+ "authors": ["Nick Sabalausky"],
+ "homepage": "https://github.com/abscissa/libInputVisitor",
+ "license": "WTFPL",
+ "sourcePaths": ["."],
+ "importPaths": ["."],
+ "excludedSourceFiles": ["libInputVisitorExample.d"]
+}
diff --git a/src/sdlang/libinputvisitor/libInputVisitor.d b/src/sdlang/libinputvisitor/libInputVisitor.d
new file mode 100644
index 0000000..15c2ce8
--- /dev/null
+++ b/src/sdlang/libinputvisitor/libInputVisitor.d
@@ -0,0 +1,91 @@
+/++
+Copyright (C) 2012 Nick Sabalausky <http://semitwist.com/contact>
+
+This program is free software. It comes without any warranty, to
+the extent permitted by applicable law. You can redistribute it
+and/or modify it under the terms of the Do What The Fuck You Want
+To Public License, Version 2, as published by Sam Hocevar. See
+http://www.wtfpl.net/ for more details.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+Everyone is permitted to copy and distribute verbatim or modified
+copies of this license document, and changing it is allowed as long
+as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. You just DO WHAT THE FUCK YOU WANT TO.
++/
+
+/++
+Should work with DMD 2.059 and up
+
+For more info on this, see:
+http://semitwist.com/articles/article/view/combine-coroutines-and-input-ranges-for-dead-simple-d-iteration
++/
+
+import core.thread;
+
+class InputVisitor(Obj, Elem) : Fiber
+{
+ bool started = false;
+ Obj obj;
+ this(Obj obj)
+ {
+ this.obj = obj;
+ super(&run);
+ }
+
+ private void run()
+ {
+ obj.visit(this);
+ }
+
+ private void ensureStarted()
+ {
+ if(!started)
+ {
+ call();
+ started = true;
+ }
+ }
+
+ // Member 'front' must be a function due to DMD Issue #5403
+ private Elem _front;
+ @property Elem front()
+ {
+ ensureStarted();
+ return _front;
+ }
+
+ void popFront()
+ {
+ ensureStarted();
+ call();
+ }
+
+ @property bool empty()
+ {
+ ensureStarted();
+ return state == Fiber.State.TERM;
+ }
+
+ void yield(Elem elem)
+ {
+ _front = elem;
+ Fiber.yield();
+ }
+}
+
+template inputVisitor(Elem)
+{
+ @property InputVisitor!(Obj, Elem) inputVisitor(Obj)(Obj obj)
+ {
+ return new InputVisitor!(Obj, Elem)(obj);
+ }
+}
diff --git a/src/sdlang/package.d b/src/sdlang/package.d
new file mode 100644
index 0000000..d990e64
--- /dev/null
+++ b/src/sdlang/package.d
@@ -0,0 +1,132 @@
+// SDLang-D
+// Written in the D programming language.
+
+/++
+$(H2 SDLang-D v0.9.3)
+
+Library for parsing and generating SDL (Simple Declarative Language).
+
+Import this module to use SDLang-D as a library.
+
+For the list of officially supported compiler versions, see the
+$(LINK2 https://github.com/Abscissa/SDLang-D/blob/master/.travis.yml, .travis.yml)
+file included with your version of SDLang-D.
+
+Links:
+$(UL
+ $(LI $(LINK2 https://github.com/Abscissa/SDLang-D, SDLang-D Homepage) )
+ $(LI $(LINK2 http://semitwist.com/sdlang-d, SDLang-D API Reference (latest version) ) )
+ $(LI $(LINK2 http://semitwist.com/sdlang-d-docs, SDLang-D API Reference (earlier versions) ) )
+ $(LI $(LINK2 http://sdl.ikayzo.org/display/SDL/Language+Guide, Official SDL Site) [$(LINK2 http://semitwist.com/sdl-mirror/Language+Guide.html, mirror)] )
+)
+
+Authors: Nick Sabalausky ("Abscissa") http://semitwist.com/contact
+Copyright:
+Copyright (C) 2012-2015 Nick Sabalausky.
+
+License: $(LINK2 https://github.com/Abscissa/SDLang-D/blob/master/LICENSE.txt, zlib/libpng)
++/
+
+module sdlang;
+
+import std.array;
+import std.datetime;
+import std.file;
+import std.stdio;
+
+import sdlang.ast;
+import sdlang.exception;
+import sdlang.lexer;
+import sdlang.parser;
+import sdlang.symbol;
+import sdlang.token;
+import sdlang.util;
+
+// Expose main public API
+public import sdlang.ast : Attribute, Tag;
+public import sdlang.exception;
+public import sdlang.parser : parseFile, parseSource;
+public import sdlang.token : Value, Token, DateTimeFrac, DateTimeFracUnknownZone;
+public import sdlang.util : sdlangVersion, Location;
+
+version(sdlangUnittest)
+ void main() {}
+
+version(sdlangTestApp)
+{
+ int main(string[] args)
+ {
+ if(
+ args.length != 3 ||
+ (args[1] != "lex" && args[1] != "parse" && args[1] != "to-sdl")
+ )
+ {
+ stderr.writeln("SDLang-D v", sdlangVersion);
+ stderr.writeln("Usage: sdlang [lex|parse|to-sdl] filename.sdl");
+ return 1;
+ }
+
+ auto filename = args[2];
+
+ try
+ {
+ if(args[1] == "lex")
+ doLex(filename);
+ else if(args[1] == "parse")
+ doParse(filename);
+ else
+ doToSDL(filename);
+ }
+ catch(SDLangParseException e)
+ {
+ stderr.writeln(e.msg);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ void doLex(string filename)
+ {
+ auto source = cast(string)read(filename);
+ auto lexer = new Lexer(source, filename);
+
+ foreach(tok; lexer)
+ {
+ // Value
+ string value;
+ if(tok.symbol == symbol!"Value")
+ value = tok.value.hasValue? toString(tok.value.type) : "{null}";
+
+ value = value==""? "\t" : "("~value~":"~tok.value.toString()~") ";
+
+ // Data
+ auto data = tok.data.replace("\n", "").replace("\r", "");
+ if(data != "")
+ data = "\t|"~tok.data~"|";
+
+ // Display
+ writeln(
+ tok.location.toString, ":\t",
+ tok.symbol.name, value,
+ data
+ );
+
+ if(tok.symbol.name == "Error")
+ break;
+ }
+ }
+
+ void doParse(string filename)
+ {
+ auto root = parseFile(filename);
+ stdout.rawWrite(root.toDebugString());
+ writeln();
+ }
+
+ void doToSDL(string filename)
+ {
+ auto root = parseFile(filename);
+ stdout.rawWrite(root.toSDLDocument());
+ }
+}
diff --git a/src/sdlang/parser.d b/src/sdlang/parser.d
new file mode 100644
index 0000000..ed8084a
--- /dev/null
+++ b/src/sdlang/parser.d
@@ -0,0 +1,551 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.parser;
+
+import std.file;
+
+import libInputVisitor;
+
+import sdlang.ast;
+import sdlang.exception;
+import sdlang.lexer;
+import sdlang.symbol;
+import sdlang.token;
+import sdlang.util;
+
+/// Returns root tag.
+Tag parseFile(string filename)
+{
+ auto source = cast(string)read(filename);
+ return parseSource(source, filename);
+}
+
+/// Returns root tag. The optional 'filename' parameter can be included
+/// so that the SDL document's filename (if any) can be displayed with
+/// any syntax error messages.
+Tag parseSource(string source, string filename=null)
+{
+ auto lexer = new Lexer(source, filename);
+ auto parser = DOMParser(lexer);
+ return parser.parseRoot();
+}
+
+/++
+Parses an SDL document using StAX/Pull-style. Returns an InputRange with
+element type ParserEvent.
+
+The pullParseFile version reads a file and parses it, while pullParseSource
+parses a string passed in. The optional 'filename' parameter in pullParseSource
+can be included so that the SDL document's filename (if any) can be displayed
+with any syntax error messages.
+
+Warning! The FileStartEvent and FileEndEvent events *might* be removed later.
+See $(LINK https://github.com/Abscissa/SDLang-D/issues/17)
+
+Example:
+------------------
+parent 12 attr="q" {
+ childA 34
+ childB 56
+}
+lastTag
+------------------
+
+The ParserEvent sequence emitted for that SDL document would be as
+follows (indented for readability):
+------------------
+FileStartEvent
+ TagStartEvent (parent)
+ ValueEvent (12)
+ AttributeEvent (attr, "q")
+ TagStartEvent (childA)
+ ValueEvent (34)
+ TagEndEvent
+ TagStartEvent (childB)
+ ValueEvent (56)
+ TagEndEvent
+ TagEndEvent
+ TagStartEvent (lastTag)
+ TagEndEvent
+FileEndEvent
+------------------
+
+Example:
+------------------
+foreach(event; pullParseFile("stuff.sdl"))
+{
+ import std.stdio;
+
+ if(event.peek!FileStartEvent())
+ writeln("FileStartEvent, starting! ");
+
+ else if(event.peek!FileEndEvent())
+ writeln("FileEndEvent, done! ");
+
+ else if(auto e = event.peek!TagStartEvent())
+ writeln("TagStartEvent: ", e.namespace, ":", e.name, " @ ", e.location);
+
+ else if(event.peek!TagEndEvent())
+ writeln("TagEndEvent");
+
+ else if(auto e = event.peek!ValueEvent())
+ writeln("ValueEvent: ", e.value);
+
+ else if(auto e = event.peek!AttributeEvent())
+ writeln("AttributeEvent: ", e.namespace, ":", e.name, "=", e.value);
+
+ else // Shouldn't happen
+ throw new Exception("Received unknown parser event");
+}
+------------------
++/
+auto pullParseFile(string filename)
+{
+ auto source = cast(string)read(filename);
+ return parseSource(source, filename);
+}
+
+///ditto
+auto pullParseSource(string source, string filename=null)
+{
+ auto lexer = new Lexer(source, filename);
+ auto parser = PullParser(lexer);
+ return inputVisitor!ParserEvent( parser );
+}
+
+/// The element of the InputRange returned by pullParseFile and pullParseSource:
+alias ParserEvent = std.variant.Algebraic!(
+ FileStartEvent,
+ FileEndEvent,
+ TagStartEvent,
+ TagEndEvent,
+ ValueEvent,
+ AttributeEvent,
+);
+
+/// Event: Start of file
+struct FileStartEvent
+{
+ Location location;
+}
+
+/// Event: End of file
+struct FileEndEvent
+{
+ Location location;
+}
+
+/// Event: Start of tag
+struct TagStartEvent
+{
+ Location location;
+ string namespace;
+ string name;
+}
+
+/// Event: End of tag
+struct TagEndEvent
+{
+ //Location location;
+}
+
+/// Event: Found a Value in the current tag
+struct ValueEvent
+{
+ Location location;
+ Value value;
+}
+
+/// Event: Found an Attribute in the current tag
+struct AttributeEvent
+{
+ Location location;
+ string namespace;
+ string name;
+ Value value;
+}
+
+// The actual pull parser
+private struct PullParser
+{
+ private Lexer lexer;
+
+ private struct IDFull
+ {
+ string namespace;
+ string name;
+ }
+
+ private void error(string msg)
+ {
+ error(lexer.front.location, msg);
+ }
+
+ private void error(Location loc, string msg)
+ {
+ throw new SDLangParseException(loc, "Error: "~msg);
+ }
+
+ private InputVisitor!(PullParser, ParserEvent) v;
+
+ void visit(InputVisitor!(PullParser, ParserEvent) v)
+ {
+ this.v = v;
+ parseRoot();
+ }
+
+ private void emit(Event)(Event event)
+ {
+ v.yield( ParserEvent(event) );
+ }
+
+ /// <Root> ::= <Tags> EOF (Lookaheads: Anything)
+ private void parseRoot()
+ {
+ //trace("Starting parse of file: ", lexer.filename);
+ //trace(__FUNCTION__, ": <Root> ::= <Tags> EOF (Lookaheads: Anything)");
+
+ auto startLocation = Location(lexer.filename, 0, 0, 0);
+ emit( FileStartEvent(startLocation) );
+
+ parseTags();
+
+ auto token = lexer.front;
+ if(!token.matches!"EOF"())
+ error("Expected end-of-file, not " ~ token.symbol.name);
+
+ emit( FileEndEvent(token.location) );
+ }
+
+ /// <Tags> ::= <Tag> <Tags> (Lookaheads: Ident Value)
+ /// | EOL <Tags> (Lookaheads: EOL)
+ /// | {empty} (Lookaheads: Anything else, except '{')
+ void parseTags()
+ {
+ //trace("Enter ", __FUNCTION__);
+ while(true)
+ {
+ auto token = lexer.front;
+ if(token.matches!"Ident"() || token.matches!"Value"())
+ {
+ //trace(__FUNCTION__, ": <Tags> ::= <Tag> <Tags> (Lookaheads: Ident Value)");
+ parseTag();
+ continue;
+ }
+ else if(token.matches!"EOL"())
+ {
+ //trace(__FUNCTION__, ": <Tags> ::= EOL <Tags> (Lookaheads: EOL)");
+ lexer.popFront();
+ continue;
+ }
+ else if(token.matches!"{"())
+ {
+ error("Anonymous tags must have at least one value. They cannot just have children and attributes only.");
+ }
+ else
+ {
+ //trace(__FUNCTION__, ": <Tags> ::= {empty} (Lookaheads: Anything else, except '{')");
+ break;
+ }
+ }
+ }
+
+ /// <Tag>
+ /// ::= <IDFull> <Values> <Attributes> <OptChild> <TagTerminator> (Lookaheads: Ident)
+ /// | <Value> <Values> <Attributes> <OptChild> <TagTerminator> (Lookaheads: Value)
+ void parseTag()
+ {
+ auto token = lexer.front;
+ if(token.matches!"Ident"())
+ {
+ //trace(__FUNCTION__, ": <Tag> ::= <IDFull> <Values> <Attributes> <OptChild> <TagTerminator> (Lookaheads: Ident)");
+ //trace("Found tag named: ", tag.fullName);
+ auto id = parseIDFull();
+ emit( TagStartEvent(token.location, id.namespace, id.name) );
+ }
+ else if(token.matches!"Value"())
+ {
+ //trace(__FUNCTION__, ": <Tag> ::= <Value> <Values> <Attributes> <OptChild> <TagTerminator> (Lookaheads: Value)");
+ //trace("Found anonymous tag.");
+ emit( TagStartEvent(token.location, null, null) );
+ }
+ else
+ error("Expected tag name or value, not " ~ token.symbol.name);
+
+ if(lexer.front.matches!"="())
+ error("Anonymous tags must have at least one value. They cannot just have attributes and children only.");
+
+ parseValues();
+ parseAttributes();
+ parseOptChild();
+ parseTagTerminator();
+
+ emit( TagEndEvent() );
+ }
+
+ /// <IDFull> ::= Ident <IDSuffix> (Lookaheads: Ident)
+ IDFull parseIDFull()
+ {
+ auto token = lexer.front;
+ if(token.matches!"Ident"())
+ {
+ //trace(__FUNCTION__, ": <IDFull> ::= Ident <IDSuffix> (Lookaheads: Ident)");
+ lexer.popFront();
+ return parseIDSuffix(token.data);
+ }
+ else
+ {
+ error("Expected namespace or identifier, not " ~ token.symbol.name);
+ assert(0);
+ }
+ }
+
+ /// <IDSuffix>
+ /// ::= ':' Ident (Lookaheads: ':')
+ /// ::= {empty} (Lookaheads: Anything else)
+ IDFull parseIDSuffix(string firstIdent)
+ {
+ auto token = lexer.front;
+ if(token.matches!":"())
+ {
+ //trace(__FUNCTION__, ": <IDSuffix> ::= ':' Ident (Lookaheads: ':')");
+ lexer.popFront();
+ token = lexer.front;
+ if(token.matches!"Ident"())
+ {
+ lexer.popFront();
+ return IDFull(firstIdent, token.data);
+ }
+ else
+ {
+ error("Expected name, not " ~ token.symbol.name);
+ assert(0);
+ }
+ }
+ else
+ {
+ //trace(__FUNCTION__, ": <IDSuffix> ::= {empty} (Lookaheads: Anything else)");
+ return IDFull("", firstIdent);
+ }
+ }
+
+ /// <Values>
+ /// ::= Value <Values> (Lookaheads: Value)
+ /// | {empty} (Lookaheads: Anything else)
+ void parseValues()
+ {
+ while(true)
+ {
+ auto token = lexer.front;
+ if(token.matches!"Value"())
+ {
+ //trace(__FUNCTION__, ": <Values> ::= Value <Values> (Lookaheads: Value)");
+ parseValue();
+ continue;
+ }
+ else
+ {
+ //trace(__FUNCTION__, ": <Values> ::= {empty} (Lookaheads: Anything else)");
+ break;
+ }
+ }
+ }
+
+ /// Handle Value terminals that aren't part of an attribute
+ void parseValue()
+ {
+ auto token = lexer.front;
+ if(token.matches!"Value"())
+ {
+ //trace(__FUNCTION__, ": (Handle Value terminals that aren't part of an attribute)");
+ auto value = token.value;
+ //trace("In tag '", parent.fullName, "', found value: ", value);
+ emit( ValueEvent(token.location, value) );
+
+ lexer.popFront();
+ }
+ else
+ error("Expected value, not "~token.symbol.name);
+ }
+
+ /// <Attributes>
+ /// ::= <Attribute> <Attributes> (Lookaheads: Ident)
+ /// | {empty} (Lookaheads: Anything else)
+ void parseAttributes()
+ {
+ while(true)
+ {
+ auto token = lexer.front;
+ if(token.matches!"Ident"())
+ {
+ //trace(__FUNCTION__, ": <Attributes> ::= <Attribute> <Attributes> (Lookaheads: Ident)");
+ parseAttribute();
+ continue;
+ }
+ else
+ {
+ //trace(__FUNCTION__, ": <Attributes> ::= {empty} (Lookaheads: Anything else)");
+ break;
+ }
+ }
+ }
+
+ /// <Attribute> ::= <IDFull> '=' Value (Lookaheads: Ident)
+ void parseAttribute()
+ {
+ //trace(__FUNCTION__, ": <Attribute> ::= <IDFull> '=' Value (Lookaheads: Ident)");
+ auto token = lexer.front;
+ if(!token.matches!"Ident"())
+ error("Expected attribute name, not "~token.symbol.name);
+
+ auto id = parseIDFull();
+
+ token = lexer.front;
+ if(!token.matches!"="())
+ error("Expected '=' after attribute name, not "~token.symbol.name);
+
+ lexer.popFront();
+ token = lexer.front;
+ if(!token.matches!"Value"())
+ error("Expected attribute value, not "~token.symbol.name);
+
+ //trace("In tag '", parent.fullName, "', found attribute '", attr.fullName, "'");
+ emit( AttributeEvent(token.location, id.namespace, id.name, token.value) );
+
+ lexer.popFront();
+ }
+
+ /// <OptChild>
+ /// ::= '{' EOL <Tags> '}' (Lookaheads: '{')
+ /// | {empty} (Lookaheads: Anything else)
+ void parseOptChild()
+ {
+ auto token = lexer.front;
+ if(token.matches!"{")
+ {
+ //trace(__FUNCTION__, ": <OptChild> ::= '{' EOL <Tags> '}' (Lookaheads: '{')");
+ lexer.popFront();
+ token = lexer.front;
+ if(!token.matches!"EOL"())
+ error("Expected newline or semicolon after '{', not "~token.symbol.name);
+
+ lexer.popFront();
+ parseTags();
+
+ token = lexer.front;
+ if(!token.matches!"}"())
+ error("Expected '}' after child tags, not "~token.symbol.name);
+ lexer.popFront();
+ }
+ else
+ {
+ //trace(__FUNCTION__, ": <OptChild> ::= {empty} (Lookaheads: Anything else)");
+ // Do nothing, no error.
+ }
+ }
+
+ /// <TagTerminator>
+ /// ::= EOL (Lookahead: EOL)
+ /// | {empty} (Lookahead: EOF)
+ void parseTagTerminator()
+ {
+ auto token = lexer.front;
+ if(token.matches!"EOL")
+ {
+ //trace(__FUNCTION__, ": <TagTerminator> ::= EOL (Lookahead: EOL)");
+ lexer.popFront();
+ }
+ else if(token.matches!"EOF")
+ {
+ //trace(__FUNCTION__, ": <TagTerminator> ::= {empty} (Lookahead: EOF)");
+ // Do nothing
+ }
+ else
+ error("Expected end of tag (newline, semicolon or end-of-file), not " ~ token.symbol.name);
+ }
+}
+
+private struct DOMParser
+{
+ Lexer lexer;
+
+ Tag parseRoot()
+ {
+ auto currTag = new Tag(null, null, "root");
+ currTag.location = Location(lexer.filename, 0, 0, 0);
+
+ auto parser = PullParser(lexer);
+ auto eventRange = inputVisitor!ParserEvent( parser );
+ foreach(event; eventRange)
+ {
+ if(auto e = event.peek!TagStartEvent())
+ {
+ auto newTag = new Tag(currTag, e.namespace, e.name);
+ newTag.location = e.location;
+
+ currTag = newTag;
+ }
+ else if(event.peek!TagEndEvent())
+ {
+ currTag = currTag.parent;
+
+ if(!currTag)
+ parser.error("Internal Error: Received an extra TagEndEvent");
+ }
+ else if(auto e = event.peek!ValueEvent())
+ {
+ currTag.add(e.value);
+ }
+ else if(auto e = event.peek!AttributeEvent())
+ {
+ auto attr = new Attribute(e.namespace, e.name, e.value, e.location);
+ currTag.add(attr);
+ }
+ else if(event.peek!FileStartEvent())
+ {
+ // Do nothing
+ }
+ else if(event.peek!FileEndEvent())
+ {
+ // There shouldn't be another parent.
+ if(currTag.parent)
+ parser.error("Internal Error: Unexpected end of file, not enough TagEndEvent");
+ }
+ else
+ parser.error("Internal Error: Received unknown parser event");
+ }
+
+ return currTag;
+ }
+}
+
+// Other parser tests are part of the AST's tests over in the ast module.
+
+// Regression test, issue #16: https://github.com/Abscissa/SDLang-D/issues/16
+version(sdlangUnittest)
+unittest
+{
+ import std.stdio;
+ writeln("parser: Regression test issue #16...");
+ stdout.flush();
+
+ // Shouldn't crash
+ foreach(event; pullParseSource(`tag "data"`))
+ {
+ event.peek!FileStartEvent();
+ }
+}
+
+// Regression test, issue #31: https://github.com/Abscissa/SDLang-D/issues/31
+// "Escape sequence results in range violation error"
+version(sdlangUnittest)
+unittest
+{
+ import std.stdio;
+ writeln("parser: Regression test issue #31...");
+ stdout.flush();
+
+ // Shouldn't get a Range violation
+ parseSource(`test "\"foo\""`);
+}
diff --git a/src/sdlang/symbol.d b/src/sdlang/symbol.d
new file mode 100644
index 0000000..14a74a7
--- /dev/null
+++ b/src/sdlang/symbol.d
@@ -0,0 +1,61 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.symbol;
+
+import std.algorithm;
+
+static immutable validSymbolNames = [
+ "Error",
+ "EOF",
+ "EOL",
+
+ ":",
+ "=",
+ "{",
+ "}",
+
+ "Ident",
+ "Value",
+];
+
+/// Use this to create a Symbol. Ex: symbol!"Value" or symbol!"="
+/// Invalid names (such as symbol!"FooBar") are rejected at compile-time.
+template symbol(string name)
+{
+ static assert(validSymbolNames.find(name), "Invalid Symbol: '"~name~"'");
+ immutable symbol = _symbol(name);
+}
+
+private Symbol _symbol(string name)
+{
+ return Symbol(name);
+}
+
+/// Symbol is essentially the "type" of a Token.
+/// Token is like an instance of a Symbol.
+///
+/// This only represents terminals. Nonterminal tokens aren't
+/// constructed since the AST is built directly during parsing.
+///
+/// You can't create a Symbol directly. Instead, use the 'symbol'
+/// template.
+struct Symbol
+{
+ private string _name;
+ @property string name()
+ {
+ return _name;
+ }
+
+ @disable this();
+ private this(string name)
+ {
+ this._name = name;
+ }
+
+ string toString()
+ {
+ return _name;
+ }
+}
diff --git a/src/sdlang/token.d b/src/sdlang/token.d
new file mode 100644
index 0000000..908d4a3
--- /dev/null
+++ b/src/sdlang/token.d
@@ -0,0 +1,505 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.token;
+
+import std.array;
+import std.base64;
+import std.conv;
+import std.datetime;
+import std.range;
+import std.string;
+import std.typetuple;
+import std.variant;
+
+import sdlang.symbol;
+import sdlang.util;
+
+/// DateTime doesn't support milliseconds, but SDL's "Date Time" type does.
+/// So this is needed for any SDL "Date Time" that doesn't include a time zone.
+struct DateTimeFrac
+{
+ DateTime dateTime;
+ Duration fracSecs;
+ deprecated("Use fracSecs instead.") {
+ @property FracSec fracSec() const { return FracSec.from!"hnsecs"(fracSecs.total!"hnsecs"); }
+ @property void fracSec(FracSec v) { fracSecs = v.hnsecs.hnsecs; }
+ }
+}
+
+/++
+If a "Date Time" literal in the SDL file has a time zone that's not found in
+your system, you get one of these instead of a SysTime. (Because it's
+impossible to indicate "unknown time zone" with 'std.datetime.TimeZone'.)
+
+The difference between this and 'DateTimeFrac' is that 'DateTimeFrac'
+indicates that no time zone was specified in the SDL at all, whereas
+'DateTimeFracUnknownZone' indicates that a time zone was specified but
+data for it could not be found on your system.
++/
+struct DateTimeFracUnknownZone
+{
+ DateTime dateTime;
+ Duration fracSecs;
+ deprecated("Use fracSecs instead.") {
+ @property FracSec fracSec() const { return FracSec.from!"hnsecs"(fracSecs.total!"hnsecs"); }
+ @property void fracSec(FracSec v) { fracSecs = v.hnsecs.hnsecs; }
+ }
+ string timeZone;
+
+ bool opEquals(const DateTimeFracUnknownZone b) const
+ {
+ return opEquals(b);
+ }
+ bool opEquals(ref const DateTimeFracUnknownZone b) const
+ {
+ return
+ this.dateTime == b.dateTime &&
+ this.fracSecs == b.fracSecs &&
+ this.timeZone == b.timeZone;
+ }
+}
+
+/++
+SDL's datatypes map to D's datatypes as described below.
+Most are straightforward, but take special note of the date/time-related types.
+
+Boolean: bool
+Null: typeof(null)
+Unicode Character: dchar
+Double-Quote Unicode String: string
+Raw Backtick Unicode String: string
+Integer (32 bits signed): int
+Long Integer (64 bits signed): long
+Float (32 bits signed): float
+Double Float (64 bits signed): double
+Decimal (128+ bits signed): real
+Binary (standard Base64): ubyte[]
+Time Span: Duration
+
+Date (with no time at all): Date
+Date Time (no timezone): DateTimeFrac
+Date Time (with a known timezone): SysTime
+Date Time (with an unknown timezone): DateTimeFracUnknownZone
++/
+alias TypeTuple!(
+ bool,
+ string, dchar,
+ int, long,
+ float, double, real,
+ Date, DateTimeFrac, SysTime, DateTimeFracUnknownZone, Duration,
+ ubyte[],
+ typeof(null),
+) ValueTypes;
+
+alias Algebraic!( ValueTypes ) Value; ///ditto
+
+template isSDLSink(T)
+{
+ enum isSink =
+ isOutputRange!T &&
+ is(ElementType!(T)[] == string);
+}
+
+string toSDLString(T)(T value) if(
+ is( T : Value ) ||
+ is( T : bool ) ||
+ is( T : string ) ||
+ is( T : dchar ) ||
+ is( T : int ) ||
+ is( T : long ) ||
+ is( T : float ) ||
+ is( T : double ) ||
+ is( T : real ) ||
+ is( T : Date ) ||
+ is( T : DateTimeFrac ) ||
+ is( T : SysTime ) ||
+ is( T : DateTimeFracUnknownZone ) ||
+ is( T : Duration ) ||
+ is( T : ubyte[] ) ||
+ is( T : typeof(null) )
+)
+{
+ Appender!string sink;
+ toSDLString(value, sink);
+ return sink.data;
+}
+
+void toSDLString(Sink)(Value value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ foreach(T; ValueTypes)
+ {
+ if(value.type == typeid(T))
+ {
+ toSDLString( value.get!T(), sink );
+ return;
+ }
+ }
+
+ throw new Exception("Internal SDLang-D error: Unhandled type of Value. Contains: "~value.toString());
+}
+
+void toSDLString(Sink)(typeof(null) value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put("null");
+}
+
+void toSDLString(Sink)(bool value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put(value? "true" : "false");
+}
+
+//TODO: Figure out how to properly handle strings/chars containing lineSep or paraSep
+void toSDLString(Sink)(string value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put('"');
+
+ // This loop is UTF-safe
+ foreach(char ch; value)
+ {
+ if (ch == '\n') sink.put(`\n`);
+ else if(ch == '\r') sink.put(`\r`);
+ else if(ch == '\t') sink.put(`\t`);
+ else if(ch == '\"') sink.put(`\"`);
+ else if(ch == '\\') sink.put(`\\`);
+ else
+ sink.put(ch);
+ }
+
+ sink.put('"');
+}
+
+void toSDLString(Sink)(dchar value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put('\'');
+
+ if (value == '\n') sink.put(`\n`);
+ else if(value == '\r') sink.put(`\r`);
+ else if(value == '\t') sink.put(`\t`);
+ else if(value == '\'') sink.put(`\'`);
+ else if(value == '\\') sink.put(`\\`);
+ else
+ sink.put(value);
+
+ sink.put('\'');
+}
+
+void toSDLString(Sink)(int value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put( "%s".format(value) );
+}
+
+void toSDLString(Sink)(long value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put( "%sL".format(value) );
+}
+
+void toSDLString(Sink)(float value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put( "%.10sF".format(value) );
+}
+
+void toSDLString(Sink)(double value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put( "%.30sD".format(value) );
+}
+
+void toSDLString(Sink)(real value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put( "%.30sBD".format(value) );
+}
+
+void toSDLString(Sink)(Date value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put(to!string(value.year));
+ sink.put('/');
+ sink.put(to!string(cast(int)value.month));
+ sink.put('/');
+ sink.put(to!string(value.day));
+}
+
+void toSDLString(Sink)(DateTimeFrac value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ toSDLString(value.dateTime.date, sink);
+ sink.put(' ');
+ sink.put("%.2s".format(value.dateTime.hour));
+ sink.put(':');
+ sink.put("%.2s".format(value.dateTime.minute));
+
+ if(value.dateTime.second != 0)
+ {
+ sink.put(':');
+ sink.put("%.2s".format(value.dateTime.second));
+ }
+
+ if(value.fracSecs != 0.msecs)
+ {
+ sink.put('.');
+ sink.put("%.3s".format(value.fracSecs.total!"msecs"));
+ }
+}
+
+void toSDLString(Sink)(SysTime value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ auto dateTimeFrac = DateTimeFrac(cast(DateTime)value, value.fracSecs);
+ toSDLString(dateTimeFrac, sink);
+
+ sink.put("-");
+
+ auto tzString = value.timezone.name;
+
+ // If name didn't exist, try abbreviation.
+ // Note that according to std.datetime docs, on Windows the
+ // stdName/dstName may not be properly abbreviated.
+ version(Windows) {} else
+ if(tzString == "")
+ {
+ auto tz = value.timezone;
+ auto stdTime = value.stdTime;
+
+ if(tz.hasDST())
+ tzString = tz.dstInEffect(stdTime)? tz.dstName : tz.stdName;
+ else
+ tzString = tz.stdName;
+ }
+
+ if(tzString == "")
+ {
+ auto offset = value.timezone.utcOffsetAt(value.stdTime);
+ sink.put("GMT");
+
+ if(offset < seconds(0))
+ {
+ sink.put("-");
+ offset = -offset;
+ }
+ else
+ sink.put("+");
+
+ sink.put("%.2s".format(offset.split.hours));
+ sink.put(":");
+ sink.put("%.2s".format(offset.split.minutes));
+ }
+ else
+ sink.put(tzString);
+}
+
+void toSDLString(Sink)(DateTimeFracUnknownZone value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ auto dateTimeFrac = DateTimeFrac(value.dateTime, value.fracSecs);
+ toSDLString(dateTimeFrac, sink);
+
+ sink.put("-");
+ sink.put(value.timeZone);
+}
+
+void toSDLString(Sink)(Duration value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ if(value < seconds(0))
+ {
+ sink.put("-");
+ value = -value;
+ }
+
+ auto days = value.total!"days"();
+ if(days != 0)
+ {
+ sink.put("%s".format(days));
+ sink.put("d:");
+ }
+
+ sink.put("%.2s".format(value.split.hours));
+ sink.put(':');
+ sink.put("%.2s".format(value.split.minutes));
+ sink.put(':');
+ sink.put("%.2s".format(value.split.seconds));
+
+ if(value.split.msecs != 0)
+ {
+ sink.put('.');
+ sink.put("%.3s".format(value.split.msecs));
+ }
+}
+
+void toSDLString(Sink)(ubyte[] value, ref Sink sink) if(isOutputRange!(Sink,char))
+{
+ sink.put('[');
+ sink.put( Base64.encode(value) );
+ sink.put(']');
+}
+
+/// This only represents terminals. Nonterminals aren't
+/// constructed since the AST is directly built during parsing.
+struct Token
+{
+ Symbol symbol = sdlang.symbol.symbol!"Error"; /// The "type" of this token
+ Location location;
+ Value value; /// Only valid when 'symbol' is symbol!"Value", otherwise null
+ string data; /// Original text from source
+
+ @disable this();
+ this(Symbol symbol, Location location, Value value=Value(null), string data=null)
+ {
+ this.symbol = symbol;
+ this.location = location;
+ this.value = value;
+ this.data = data;
+ }
+
+ /// Tokens with differing symbols are always unequal.
+ /// Tokens with differing values are always unequal.
+ /// Tokens with differing Value types are always unequal.
+ /// Member 'location' is always ignored for comparison.
+ /// Member 'data' is ignored for comparison *EXCEPT* when the symbol is Ident.
+ bool opEquals(Token b)
+ {
+ return opEquals(b);
+ }
+ bool opEquals(ref Token b) ///ditto
+ {
+ if(
+ this.symbol != b.symbol ||
+ this.value.type != b.value.type ||
+ this.value != b.value
+ )
+ return false;
+
+ if(this.symbol == .symbol!"Ident")
+ return this.data == b.data;
+
+ return true;
+ }
+
+ bool matches(string symbolName)()
+ {
+ return this.symbol == .symbol!symbolName;
+ }
+}
+
+version(sdlangUnittest)
+unittest
+{
+ import std.stdio;
+ writeln("Unittesting sdlang token...");
+ stdout.flush();
+
+ auto loc = Location("", 0, 0, 0);
+ auto loc2 = Location("a", 1, 1, 1);
+
+ assert(Token(symbol!"EOL",loc) == Token(symbol!"EOL",loc ));
+ assert(Token(symbol!"EOL",loc) == Token(symbol!"EOL",loc2));
+ assert(Token(symbol!":", loc) == Token(symbol!":", loc ));
+ assert(Token(symbol!"EOL",loc) != Token(symbol!":", loc ));
+ assert(Token(symbol!"EOL",loc,Value(null),"\n") == Token(symbol!"EOL",loc,Value(null),"\n"));
+
+ assert(Token(symbol!"EOL",loc,Value(null),"\n") == Token(symbol!"EOL",loc,Value(null),";" ));
+ assert(Token(symbol!"EOL",loc,Value(null),"A" ) == Token(symbol!"EOL",loc,Value(null),"B" ));
+ assert(Token(symbol!":", loc,Value(null),"A" ) == Token(symbol!":", loc,Value(null),"BB"));
+ assert(Token(symbol!"EOL",loc,Value(null),"A" ) != Token(symbol!":", loc,Value(null),"A" ));
+
+ assert(Token(symbol!"Ident",loc,Value(null),"foo") == Token(symbol!"Ident",loc,Value(null),"foo"));
+ assert(Token(symbol!"Ident",loc,Value(null),"foo") != Token(symbol!"Ident",loc,Value(null),"BAR"));
+
+ assert(Token(symbol!"Value",loc,Value(null),"foo") == Token(symbol!"Value",loc, Value(null),"foo"));
+ assert(Token(symbol!"Value",loc,Value(null),"foo") == Token(symbol!"Value",loc2,Value(null),"foo"));
+ assert(Token(symbol!"Value",loc,Value(null),"foo") == Token(symbol!"Value",loc, Value(null),"BAR"));
+ assert(Token(symbol!"Value",loc,Value( 7),"foo") == Token(symbol!"Value",loc, Value( 7),"BAR"));
+ assert(Token(symbol!"Value",loc,Value( 7),"foo") != Token(symbol!"Value",loc, Value( "A"),"foo"));
+ assert(Token(symbol!"Value",loc,Value( 7),"foo") != Token(symbol!"Value",loc, Value( 2),"foo"));
+ assert(Token(symbol!"Value",loc,Value(cast(int)7)) != Token(symbol!"Value",loc, Value(cast(long)7)));
+ assert(Token(symbol!"Value",loc,Value(cast(float)1.2)) != Token(symbol!"Value",loc, Value(cast(double)1.2)));
+}
+
+version(sdlangUnittest)
+unittest
+{
+ import std.stdio;
+ writeln("Unittesting sdlang Value.toSDLString()...");
+ stdout.flush();
+
+ // Bool and null
+ assert(Value(null ).toSDLString() == "null");
+ assert(Value(true ).toSDLString() == "true");
+ assert(Value(false).toSDLString() == "false");
+
+ // Base64 Binary
+ assert(Value(cast(ubyte[])"hello world".dup).toSDLString() == "[aGVsbG8gd29ybGQ=]");
+
+ // Integer
+ assert(Value(cast( int) 7).toSDLString() == "7");
+ assert(Value(cast( int)-7).toSDLString() == "-7");
+ assert(Value(cast( int) 0).toSDLString() == "0");
+
+ assert(Value(cast(long) 7).toSDLString() == "7L");
+ assert(Value(cast(long)-7).toSDLString() == "-7L");
+ assert(Value(cast(long) 0).toSDLString() == "0L");
+
+ // Floating point
+ assert(Value(cast(float) 1.5).toSDLString() == "1.5F");
+ assert(Value(cast(float)-1.5).toSDLString() == "-1.5F");
+ assert(Value(cast(float) 0).toSDLString() == "0F");
+
+ assert(Value(cast(double) 1.5).toSDLString() == "1.5D");
+ assert(Value(cast(double)-1.5).toSDLString() == "-1.5D");
+ assert(Value(cast(double) 0).toSDLString() == "0D");
+
+ assert(Value(cast(real) 1.5).toSDLString() == "1.5BD");
+ assert(Value(cast(real)-1.5).toSDLString() == "-1.5BD");
+ assert(Value(cast(real) 0).toSDLString() == "0BD");
+
+ // String
+ assert(Value("hello" ).toSDLString() == `"hello"`);
+ assert(Value(" hello ").toSDLString() == `" hello "`);
+ assert(Value("" ).toSDLString() == `""`);
+ assert(Value("hello \r\n\t\"\\ world").toSDLString() == `"hello \r\n\t\"\\ world"`);
+ assert(Value("日本語").toSDLString() == `"日本語"`);
+
+ // Chars
+ assert(Value(cast(dchar) 'A').toSDLString() == `'A'`);
+ assert(Value(cast(dchar)'\r').toSDLString() == `'\r'`);
+ assert(Value(cast(dchar)'\n').toSDLString() == `'\n'`);
+ assert(Value(cast(dchar)'\t').toSDLString() == `'\t'`);
+ assert(Value(cast(dchar)'\'').toSDLString() == `'\''`);
+ assert(Value(cast(dchar)'\\').toSDLString() == `'\\'`);
+ assert(Value(cast(dchar) '月').toSDLString() == `'月'`);
+
+ // Date
+ assert(Value(Date( 2004,10,31)).toSDLString() == "2004/10/31");
+ assert(Value(Date(-2004,10,31)).toSDLString() == "-2004/10/31");
+
+ // DateTimeFrac w/o Frac
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 14,30,15))).toSDLString() == "2004/10/31 14:30:15");
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 1, 2, 3))).toSDLString() == "2004/10/31 01:02:03");
+ assert(Value(DateTimeFrac(DateTime(-2004,10,31, 14,30,15))).toSDLString() == "-2004/10/31 14:30:15");
+
+ // DateTimeFrac w/ Frac
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 14,30,15), 123.msecs)).toSDLString() == "2004/10/31 14:30:15.123");
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 14,30,15), 120.msecs)).toSDLString() == "2004/10/31 14:30:15.120");
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 14,30,15), 100.msecs)).toSDLString() == "2004/10/31 14:30:15.100");
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 14,30,15), 12.msecs)).toSDLString() == "2004/10/31 14:30:15.012");
+ assert(Value(DateTimeFrac(DateTime(2004,10,31, 14,30,15), 1.msecs)).toSDLString() == "2004/10/31 14:30:15.001");
+ assert(Value(DateTimeFrac(DateTime(-2004,10,31, 14,30,15), 123.msecs)).toSDLString() == "-2004/10/31 14:30:15.123");
+
+ // DateTimeFracUnknownZone
+ assert(Value(DateTimeFracUnknownZone(DateTime(2004,10,31, 14,30,15), 123.msecs, "Foo/Bar")).toSDLString() == "2004/10/31 14:30:15.123-Foo/Bar");
+
+ // SysTime
+ assert(Value(SysTime(DateTime(2004,10,31, 14,30,15), new immutable SimpleTimeZone( hours(0) ))).toSDLString() == "2004/10/31 14:30:15-GMT+00:00");
+ assert(Value(SysTime(DateTime(2004,10,31, 1, 2, 3), new immutable SimpleTimeZone( hours(0) ))).toSDLString() == "2004/10/31 01:02:03-GMT+00:00");
+ assert(Value(SysTime(DateTime(2004,10,31, 14,30,15), new immutable SimpleTimeZone( hours(2)+minutes(10) ))).toSDLString() == "2004/10/31 14:30:15-GMT+02:10");
+ assert(Value(SysTime(DateTime(2004,10,31, 14,30,15), new immutable SimpleTimeZone(-hours(5)-minutes(30) ))).toSDLString() == "2004/10/31 14:30:15-GMT-05:30");
+ assert(Value(SysTime(DateTime(2004,10,31, 14,30,15), new immutable SimpleTimeZone( hours(2)+minutes( 3) ))).toSDLString() == "2004/10/31 14:30:15-GMT+02:03");
+ assert(Value(SysTime(DateTime(2004,10,31, 14,30,15), 123.msecs, new immutable SimpleTimeZone( hours(0) ))).toSDLString() == "2004/10/31 14:30:15.123-GMT+00:00");
+
+ // Duration
+ assert( "12:14:42" == Value( days( 0)+hours(12)+minutes(14)+seconds(42)+msecs( 0)).toSDLString());
+ assert("-12:14:42" == Value(-days( 0)-hours(12)-minutes(14)-seconds(42)-msecs( 0)).toSDLString());
+ assert( "00:09:12" == Value( days( 0)+hours( 0)+minutes( 9)+seconds(12)+msecs( 0)).toSDLString());
+ assert( "00:00:01.023" == Value( days( 0)+hours( 0)+minutes( 0)+seconds( 1)+msecs( 23)).toSDLString());
+ assert( "23d:05:21:23.532" == Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs(532)).toSDLString());
+ assert( "23d:05:21:23.530" == Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs(530)).toSDLString());
+ assert( "23d:05:21:23.500" == Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs(500)).toSDLString());
+ assert("-23d:05:21:23.532" == Value(-days(23)-hours( 5)-minutes(21)-seconds(23)-msecs(532)).toSDLString());
+ assert("-23d:05:21:23.500" == Value(-days(23)-hours( 5)-minutes(21)-seconds(23)-msecs(500)).toSDLString());
+ assert( "23d:05:21:23" == Value( days(23)+hours( 5)+minutes(21)+seconds(23)+msecs( 0)).toSDLString());
+}
diff --git a/src/sdlang/util.d b/src/sdlang/util.d
new file mode 100644
index 0000000..329e387
--- /dev/null
+++ b/src/sdlang/util.d
@@ -0,0 +1,84 @@
+// SDLang-D
+// Written in the D programming language.
+
+module sdlang.util;
+
+import std.algorithm;
+import std.datetime;
+import std.stdio;
+import std.string;
+
+import sdlang.token;
+
+enum sdlangVersion = "0.9.1";
+
+alias immutable(ubyte)[] ByteString;
+
+auto startsWith(T)(string haystack, T needle)
+ if( is(T:ByteString) || is(T:string) )
+{
+ return std.algorithm.startsWith( cast(ByteString)haystack, cast(ByteString)needle );
+}
+
+struct Location
+{
+ string file; /// Filename (including path)
+ int line; /// Zero-indexed
+ int col; /// Zero-indexed, Tab counts as 1
+ size_t index; /// Index into the source
+
+ this(int line, int col, int index)
+ {
+ this.line = line;
+ this.col = col;
+ this.index = index;
+ }
+
+ this(string file, int line, int col, int index)
+ {
+ this.file = file;
+ this.line = line;
+ this.col = col;
+ this.index = index;
+ }
+
+ string toString()
+ {
+ return "%s(%s:%s)".format(file, line+1, col+1);
+ }
+}
+
+void removeIndex(E)(ref E[] arr, ptrdiff_t index)
+{
+ arr = arr[0..index] ~ arr[index+1..$];
+}
+
+void trace(string file=__FILE__, size_t line=__LINE__, TArgs...)(TArgs args)
+{
+ version(sdlangTrace)
+ {
+ writeln(file, "(", line, "): ", args);
+ stdout.flush();
+ }
+}
+
+string toString(TypeInfo ti)
+{
+ if (ti == typeid( bool )) return "bool";
+ else if(ti == typeid( string )) return "string";
+ else if(ti == typeid( dchar )) return "dchar";
+ else if(ti == typeid( int )) return "int";
+ else if(ti == typeid( long )) return "long";
+ else if(ti == typeid( float )) return "float";
+ else if(ti == typeid( double )) return "double";
+ else if(ti == typeid( real )) return "real";
+ else if(ti == typeid( Date )) return "Date";
+ else if(ti == typeid( DateTimeFrac )) return "DateTimeFrac";
+ else if(ti == typeid( DateTimeFracUnknownZone )) return "DateTimeFracUnknownZone";
+ else if(ti == typeid( SysTime )) return "SysTime";
+ else if(ti == typeid( Duration )) return "Duration";
+ else if(ti == typeid( ubyte[] )) return "ubyte[]";
+ else if(ti == typeid( typeof(null) )) return "null";
+
+ return "{unknown}";
+}
diff --git a/src/sdp.d b/src/sdp.d
index d17c379..c29014c 100755
--- a/src/sdp.d
+++ b/src/sdp.d
@@ -8,13 +8,18 @@ import
compile_time_info, // sdp/compile_time_info.d
ao_abstract_doc_source, // sdp/ao_abstract_doc_source.d
ao_defaults, // sdp/ao_defaults.d
- ao_header_extract, // sdp/ao_header_extract.d
+ ao_read_config_files, // sdp/ao_read_config_files.d
ao_read_source_files, // sdp/ao_read_source_files.d
ao_output_debugs, // sdp/ao_output_debugs.d
ao_rgx, // sdp/ao_rgx.d
ao_ansi_colors, // sdp/ao_ansi_colors.d
output_hub; // output_hub.d
// std.conv;
+/+ sdlang http://sdlang.org +/
+import sdlang; // sdlang.d
+ // sdlang.parser, // sdlang/parser.d
+ // sdlang.exceptions; // sdp/ao_ansi_colors.d
+ // // std.conv;
/+ sdp sisu document parser +/
private import
std.getopt,
@@ -36,74 +41,73 @@ private import
std.utf,
// std.variant,
std.conv : to;
+struct SDPoutput {
+ auto hub(S)(
+ auto ref const S contents,
+ string[][string][string] bookindex_unordered_hashes,
+ JSONValue[] biblio,
+ // JSONValue[string] dochead_make_json,
+ // JSONValue[string] dochead_meta_json,
+ string fn_src,
+ bool[string] opt_action_bool
+ ) {
+ mixin ScreenTxtColors;
+ mixin RgxInit;
+ mixin SiSUoutputHub;
+ auto rgx = Rgx();
+ uint return_ = 0;
+ if (opt_action_bool["source"]) {
+ writeln("sisupod source");
+ }
+ if (opt_action_bool["sisupod"]) {
+ writeln("sisupod source");
+ }
+ if (opt_action_bool["text"]) {
+ writeln("text processing");
+ // auto text=SDPoutput_text();
+ // text.scroll(contents, bookindex_unordered_hashes, biblio, fn_src, opt_action_bool);
+ // // text.scroll(contents, bookindex_unordered_hashes, biblio, dochead_make, dochead_meta, fn_src, opt_action_bool);
+ }
+ if (opt_action_bool["html"]) {
+ auto html=SDPoutputHTML();
+ html.css_write;
+ html.scroll(contents, bookindex_unordered_hashes, biblio, fn_src, opt_action_bool);
+ // html.scroll(contents, bookindex_unordered_hashes, biblio, dochead_make_json, dochead_meta_json, fn_src, opt_action_bool);
+ }
+ if (opt_action_bool["epub"]) {
+ writeln("epub processing");
+ }
+ if (opt_action_bool["pdf"]) {
+ writeln("pdf processing");
+ }
+ if (opt_action_bool["odt"]) {
+ writeln("odt processing");
+ }
+ if (opt_action_bool["sqlite"]) {
+ writeln("sqlite processing");
+ }
+ if (opt_action_bool["postgresql"]) {
+ writeln("pgsql processing");
+ }
+ return return_;
+ }
+}
mixin(import("version.txt"));
mixin CompileTimeInfo;
mixin RgxInit;
void main(string[] args) {
- struct SDPoutput {
- auto hub(S)(
- auto ref const S contents,
- string[][string][string] bookindex_unordered_hashes,
- JSONValue[] biblio,
- // JSONValue[string] dochead_make_json,
- // JSONValue[string] dochead_meta_json,
- string fn_src,
- bool[string] opt_action_bool
- ) {
- mixin ScreenTxtColors;
- mixin RgxInit;
- mixin SiSUoutputHub;
- auto rgx = Rgx();
- uint return_ = 0;
- if (opt_action_bool["source"]) {
- writeln("sisupod source");
- }
- if (opt_action_bool["sisupod"]) {
- writeln("sisupod source");
- }
- if (opt_action_bool["text"]) {
- writeln("text processing");
- // auto text=SDPoutput_text();
- // text.scroll(contents, bookindex_unordered_hashes, biblio, fn_src, opt_action_bool);
- // // text.scroll(contents, bookindex_unordered_hashes, biblio, dochead_make, dochead_meta, fn_src, opt_action_bool);
- }
- if (opt_action_bool["html"]) {
- auto html=SDPoutputHTML();
- html.css_write;
- html.scroll(contents, bookindex_unordered_hashes, biblio, fn_src, opt_action_bool);
- // html.scroll(contents, bookindex_unordered_hashes, biblio, dochead_make_json, dochead_meta_json, fn_src, opt_action_bool);
- }
- if (opt_action_bool["epub"]) {
- writeln("epub processing");
- }
- if (opt_action_bool["pdf"]) {
- writeln("pdf processing");
- }
- if (opt_action_bool["odt"]) {
- writeln("odt processing");
- }
- if (opt_action_bool["sqlite"]) {
- writeln("sqlite processing");
- }
- if (opt_action_bool["postgresql"]) {
- writeln("pgsql processing");
- }
- return return_;
- }
- }
mixin SiSUheaderSkel;
- mixin SiSUheaderExtract;
mixin SiSUbiblio;
mixin SiSUrgxInitFlags;
- // mixin SiSUconfiguration;
+ mixin SiSUconfiguration;
mixin SiSUmarkupRaw;
mixin SiSUdocAbstraction;
mixin SiSUoutputDebugs;
mixin SiSUoutputHub;
mixin ScreenTxtColors;
auto raw = MarkupRaw();
- auto head = HeaderDocMetadataMakeJson();
+ auto headsdl = HeaderExtractSDL();
auto abs = Abstraction();
auto dbg = SDPoutputDebugs();
auto output = SDPoutput();
@@ -200,6 +204,34 @@ void main(string[] args) {
arg_unrecognized ~= " " ~ arg;
}
}
+ auto conf = Config();
+ auto configuration = conf.readInConfigFile();
+ /+ sdlang config +/
+ Tag sdl_root_conf;
+ try {
+ sdl_root_conf = parseSource(configuration);
+ }
+ catch(SDLangParseException e) {
+ stderr.writeln("SDLang problem with config.sdl content");
+ // Error messages of the form:
+ // myFile.sdl(5:28): Error: Invalid integer suffix.
+ stderr.writeln(e.msg);
+ }
+ debug(sdlang) {
+ // Value is a std.variant.Algebraic
+ Value output_dir_structure_by = sdl_root_conf.tags["output_dir_structure_by"][0].values[0];
+ assert(output_dir_structure_by.type == typeid(string));
+ writeln(output_dir_structure_by);
+
+ // Tag person = sdl_root_conf.namespaces["myNamespace"].tags["person"][0];
+ // writeln("Name: ", person.attributes["name"][0].value);
+ //
+ // int age = person.tags["age"][0].values[0].get!int();
+ // writeln("Age: ", age);
+
+ writeln("config SDL:");
+ writeln(sdl_root_conf.toSDLDocument());
+ }
foreach(fn_src; fns_src) {
// foreach(fn_src; fns_src) {
if (!empty(fn_src)) {
@@ -234,13 +266,10 @@ void main(string[] args) {
writeln(header_and_content_tuple.length);
writeln(sourcefile_content[0]);
}
- /+ ↓ headers metadata & make +/
- auto header_content = head.headerContentJSON(header);
- static assert(!isTypeTuple!(header_content));
- auto dochead_make_json = header_content[0];
- auto dochead_meta_json = header_content[1];
+ /+ ↓ headers metadata & make sdlang +/
+ auto header_sdlang = headsdl.headerSDLang(header);
/+ ↓ porcess document, return abstraction as tuple +/
- auto t = abs.abstract_doc_source(sourcefile_content, dochead_make_json, dochead_meta_json);
+ auto t = abs.abstract_doc_source(sourcefile_content);
static assert(!isTypeTuple!(t));
auto doc_ao_contents = t[0]; // contents ~ endnotes ~ bookindex;
// static assert(!isIterable!(doc_ao_contents));
@@ -253,8 +282,8 @@ void main(string[] args) {
doc_ao_contents,
doc_ao_bookindex_unordered_hashes,
doc_ao_biblio,
- dochead_make_json,
- dochead_meta_json,
+ // doc_ao_make_json,
+ // doc_ao_metadata_json,
fn_src,
opt_action_bool
);
diff --git a/src/sdp/ao_abstract_doc_source.d b/src/sdp/ao_abstract_doc_source.d
index ca435ff..c814c15 100644
--- a/src/sdp/ao_abstract_doc_source.d
+++ b/src/sdp/ao_abstract_doc_source.d
@@ -120,11 +120,8 @@ template SiSUdocAbstraction() {
// mixin SiSUdocAbstractionFunctions;
/+ ↓ abstract marked up document +/
- auto abstract_doc_source(
- char[][] markup_sourcefile_content,
- JSONValue[string] dochead_make_json,
- JSONValue[string] dochead_meta_json
- ) {
+ auto abstract_doc_source(char[][] markup_sourcefile_content) {
+
/+ ↓ abstraction init +/
scope(success) {
}
@@ -341,24 +338,24 @@ template SiSUdocAbstraction() {
&& ((type["para"] == State.off)
&& (type["heading"] == State.off))) {
/+ heading or para but neither flag nor line exists +/
- if ((to!string(dochead_make_json["make"]["headings"]).length > 2)
- && (type["make_headings"] == State.off)) {
- /+ heading found +/
- auto dochead_make_headings =
- to!string(dochead_make_json["make"]["headings"]);
- heading_found(line, dochead_make_headings, heading_match_str, heading_match_rgx, type);
- }
+ // if ((to!string(dochead_make["make"]["headings"]).length > 2)
+ // && (type["make_headings"] == State.off)) {
+ // /+ heading found +/
+ // auto dochead_make_headings =
+ // to!string(dochead_make["make"]["headings"]);
+ // heading_found(line, dochead_make_headings, heading_match_str, heading_match_rgx, type);
+ // }
if ((type["make_headings"] == State.on)
&& ((line_occur["para"] == State.off)
&& (line_occur["heading"] == State.off))
&& ((type["para"] == State.off)
&& (type["heading"] == State.off))) {
/+ heading make set +/
- heading_make_set(line, line_occur, heading_match_rgx, type);
+ // heading_make_set(line, line_occur, heading_match_rgx, type);
}
if (matchFirst(line, rgx.heading)) {
/+ heading match +/
- heading_matched(line, line_occur, an_object, lv, collapsed_lev, type, dochead_meta_json);
+ heading_matched(line, line_occur, an_object, lv, collapsed_lev, type);
} else if (line_occur["para"] == State.off) {
/+ para match +/
para_match(line, an_object, indent, bullet, type, line_occur);
@@ -1617,94 +1614,6 @@ template SiSUdocAbstraction() {
}
}
}
- auto heading_found(
- char[] line,
- string dochead_make_headings,
- ref string[string] heading_match_str,
- ref Regex!(char)[string] heading_match_rgx,
- ref int[string] type
- ) {
- if ((to!string(dochead_make_headings).length > 2)
- && (type["make_headings"] == State.off)) {
- /+ headings found +/
- debug(headingsfound) {
- writeln(dochead_make_headings);
- }
- auto make_headings_txt =
- match(
- to!string(dochead_make_headings),
- rgx.within_quotes);
- char[][] make_headings_spl =
- split(
- cast(char[]) make_headings_txt.captures[1],
- rgx.make_heading_delimiter);
- debug(headingsfound) {
- writeln(make_headings_spl.length);
- writeln(make_headings_spl);
- }
- switch (make_headings_spl.length) {
- case 7 :
- if (!empty(make_headings_spl[6])) {
- heading_match_str["h_4"] =
- "^(" ~ to!string(make_headings_spl[6]) ~ ")";
- heading_match_rgx["h_4"] =
- regex(heading_match_str["h_4"]);
- }
- goto case;
- case 6 :
- if (!empty(make_headings_spl[5])) {
- heading_match_str["h_3"] =
- "^(" ~ to!string(make_headings_spl[5]) ~ ")";
- heading_match_rgx["h_3"] =
- regex(heading_match_str["h_3"]);
- }
- goto case;
- case 5 :
- if (!empty(make_headings_spl[4])) {
- heading_match_str["h_2"] =
- "^(" ~ to!string(make_headings_spl[4]) ~ ")";
- heading_match_rgx["h_2"] =
- regex(heading_match_str["h_2"]);
- }
- goto case;
- case 4 :
- if (!empty(make_headings_spl[3])) {
- heading_match_str["h_1"] =
- "^(" ~ to!string(make_headings_spl[3]) ~ ")";
- heading_match_rgx["h_1"] =
- regex(heading_match_str["h_1"]);
- }
- goto case;
- case 3 :
- if (!empty(make_headings_spl[2])) {
- heading_match_str["h_D"] =
- "^(" ~ to!string(make_headings_spl[2]) ~ ")";
- heading_match_rgx["h_D"] =
- regex(heading_match_str["h_D"]);
- }
- goto case;
- case 2 :
- if (!empty(make_headings_spl[1])) {
- heading_match_str["h_C"] =
- "^(" ~ to!string(make_headings_spl[1]) ~ ")";
- heading_match_rgx["h_C"] =
- regex(heading_match_str["h_C"]);
- }
- goto case;
- case 1 :
- if (!empty(make_headings_spl[0])) {
- heading_match_str["h_B"] =
- "^(" ~ to!string(make_headings_spl[0]) ~ ")";
- heading_match_rgx["h_B"] =
- regex(heading_match_str["h_B"]);
- }
- break;
- default:
- break;
- }
- type["make_headings"] = State.on;
- }
- }
auto heading_make_set(
ref char[] line,
ref int[string] line_occur,
@@ -1767,8 +1676,8 @@ template SiSUdocAbstraction() {
ref string[string] an_object,
ref int[string] lv,
ref int[string] collapsed_lev,
- ref int[string] type,
- ref JSONValue[string] dochead_meta_json
+ ref int[string] type
+ // ref JSONValue[string] dochead_meta_json
) {
if (auto m = match(line, rgx.heading)) {
/+ heading match +/
@@ -1782,10 +1691,10 @@ template SiSUdocAbstraction() {
assertions_doc_structure(an_object, lv); // includes most of the logic for collapsed levels
switch (an_object["lev"]) {
case "A":
- an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(dochead_meta_json["title"]["main"]));
- an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(dochead_meta_json["creator"]["author"]));
- // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(parseJSON(dochead_meta_json["title"]["main"])));
- // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(parseJSON(dochead_meta_json["creator"]["author"])));
+ // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(dochead_metadata["title"]["main"]));
+ // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(dochead_metadata["creator"]["author"]));
+ // // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_title, to!string(parseJSON(dochead_metadata["title"]["main"])));
+ // // an_object["obj"]=replaceFirst(an_object["obj"], rgx.head_value_author, to!string(parseJSON(dochead_metadata["creator"]["author"])));
collapsed_lev["h0"] = 1;
an_object["lev_collapsed_number"] =
to!string(collapsed_lev["h0"]);
diff --git a/src/sdp/ao_header_extract.d b/src/sdp/ao_header_extract.d
deleted file mode 100644
index 7858406..0000000
--- a/src/sdp/ao_header_extract.d
+++ /dev/null
@@ -1,334 +0,0 @@
-/+
- extract header return json
-+/
-template SiSUheaderExtract() {
- private import
- std.exception,
- std.regex,
- std.utf,
- std.conv : to;
- private import
- ao_rgx; // ao_defaults.d
- struct HeaderDocMetadataMakeJson {
- mixin SiSUrgxInitFlags;
- mixin RgxInit;
- auto rgx = Rgx();
- enum State { off, on }
- string hm, hs;
- auto header_metadata_and_make_jsonstr(
- string header,
- JSONValue[string] dochead_meta,
- JSONValue[string] dochead_make
- )
- in { }
- body {
- scope(exit) {
- destroy(header);
- destroy(dochead_meta);
- destroy(dochead_make);
- }
- if (auto t = match(header, rgx.head_main)) {
- char[][] obj_spl = split(
- cast(char[]) header,
- rgx.line_delimiter_ws_strip
- );
- auto hm = to!string(t.captures[1]);
- if (match(hm, rgx.main_headers)) {
- foreach (line; obj_spl) {
- if (auto m = match(line, rgx.head_main)) {
- if (!empty(m.captures[2])) {
- if (hm == "creator") {
- dochead_meta[hm]["author"].str =
- to!string(m.captures[2]);
- } else if (hm == "title") {
- dochead_meta[hm]["main"].str =
- to!string(m.captures[2]);
- } else if (hm == "publisher") {
- dochead_meta[hm]["name"].str =
- to!string(m.captures[2]);
- }
- }
- } else if (auto s = match(line, rgx.head_sub)) {
- if (!empty(s.captures[2])) {
- auto hs = to!string(s.captures[1]);
- if ((hm == "make" )
- && (dochead_make[hm].type() == JSON_TYPE.OBJECT)) {
- switch (hm) {
- case "make":
- if (match(hs, rgx.subhead_make)) {
- if (dochead_make[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_make[hm][hs].str = to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- default:
- break;
- }
- } else if (dochead_meta[hm].type() == JSON_TYPE.OBJECT) {
- switch (hm) {
- case "creator":
- if (match(hs, rgx.subhead_creator)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "title":
- if (match(hs, rgx.subhead_title)) {
- if ((hs == "subtitle")
- && (dochead_meta[hm]["sub"].type() == JSON_TYPE.STRING)) {
- dochead_meta[hm]["sub"].str =
- to!string(s.captures[2]);
- } else if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "rights":
- if (match(hs, rgx.subhead_rights)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "date":
- if (match(hs, rgx.subhead_date)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "original":
- if (match(hs, rgx.subhead_original)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "classify":
- if (match(hs, rgx.subhead_classify)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "identifier":
- if (match(hs, rgx.subhead_identifier)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "notes":
- if (match(hs, rgx.subhead_notes)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "publisher":
- if (match(hs, rgx.subhead_publisher)) {
- if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- dochead_meta[hm][hs].str =
- to!string(s.captures[2]);
- }
- } else {
- writeln("not a valid header type:", hm, ":", hs);
- destroy(hm);
- destroy(hs);
- }
- break;
- case "links":
- destroy(hm);
- destroy(hs);
- // if (match(hs, rgx.subhead_links)) {
- // if (dochead_meta[hm][hs].type() == JSON_TYPE.STRING) {
- // dochead_meta[hm][hs].str = to!string(s.captures[2]);
- // }
- // } else {
- // writeln("not a valid header type:", hm, ":", hs);
- // destroy(hm);
- // destroy(hs);
- // }
- break;
- default:
- break;
- }
- }
- }
- }
- }
- } else {
- writeln("not a valid header type:", hm);
- }
- }
- auto t = tuple(dochead_meta, dochead_make);
- static assert(!isTypeTuple!(t));
- return t;
- }
- private auto header_extract(
- char[] line,
- ref int[string] line_occur,
- ref string[string] an_object,
- ref int[string] type
- ) {
- if (matchFirst(line, rgx.header_make)) {
- /+ matched header_make +/
- debug(header1) { // header
- // tell_l("yellow", line);
- }
- type["header"] = State.on;
- type["header_make"] = State.on;
- type["header_meta"] = State.off;
- ++line_occur["header_make"];
- an_object["obj"] ~= line ~= "\n";
- } else if (matchFirst(line, rgx.header_meta)) {
- /+ matched header_metadata +/
- debug(header1) { // header
- // tell_l("yellow", line);
- }
- type["header"] = State.on;
- type["header_make"] = State.off;
- type["header_meta"] = State.on;
- ++line_occur["header_meta"];
- an_object["obj"] ~= line ~= "\n";
- } else if (type["header_make"] == State.on
- && (line_occur["header_make"] > State.off)) {
- /+ header_make flag set +/
- if (matchFirst(line, rgx.header_sub)) {
- /+ sub-header +/
- debug(header1) {
- // tell_l("yellow", line);
- }
- // type["header"] = State.on;
- ++line_occur["header_make"];
- an_object["obj"] ~= line ~= "\n";
- }
- } else if (type["header_meta"] == State.on
- && (line_occur["header_meta"] > State.off)) {
- /+ header_metadata flag set +/
- if (matchFirst(line, rgx.header_sub)) {
- /+ sub-header +/
- debug(header1) {
- // tell_l("yellow", line);
- }
- ++line_occur["header_meta"];
- an_object["obj"] ~= line ~= "\n";
- }
- }
- // return 0;
- return an_object;
- }
- auto header_set_common(
- ref int[string] line_occur,
- ref string[string] an_object,
- ref int[string] type
- ) {
- // line_occur["header"] = State.off;
- line_occur["header_make"] = State.off;
- line_occur["header_meta"] = State.off;
- type["header"] = State.off;
- // type["header_make"] = State.off;
- // type["header_meta"] = State.off;
- an_object.remove("obj");
- an_object.remove("is");
- an_object.remove("attrib");
- }
- private auto headerContentJSON(in char[] src_header) {
- auto type = flags_type_init;
- type = [
- "header" : State.off,
- "header_make" : State.off,
- "header_meta" : State.off,
- ];
- string[string] an_object;
- int[string] line_occur;
- auto dochead_make = parseJSON(header_make_jsonstr).object;
- auto dochead_meta = parseJSON(header_meta_jsonstr).object;
- auto set_header = HeaderDocMetadataMakeJson();
- char[][] source_header_arr =
- split(cast(char[]) src_header, rgx.line_delimiter);
- foreach(header_line; source_header_arr) {
- if (auto m = matchFirst(header_line, rgx.comment)) {
- /+ matched comment +/
- debug(comment) {
- // tell_l("blue", header_line);
- }
- header_set_common(line_occur, an_object, type);
- // type["header_make"] = State.off;
- // type["header_meta"] = State.off;
- } else if ((matchFirst(header_line, rgx.header))
- || (type["header_make"] == State.on
- && (line_occur["header_make"] > State.off))
- || (type["header_meta"] == State.on
- && (line_occur["header_meta"] > State.off))) {
- if (header_line.length == 0) {
- /+ header_make instructions (current line empty) +/
- auto dochead_metadata_and_make =
- set_header.header_metadata_and_make_jsonstr(strip(an_object["obj"]), dochead_meta, dochead_make);
- static assert(!isTypeTuple!(dochead_metadata_and_make));
- dochead_meta = dochead_metadata_and_make[0];
- dochead_make = dochead_metadata_and_make[1];
- header_set_common(line_occur, an_object, type);
- type["header_make"] = State.off;
- type["header_meta"] = State.off;
- writeln(dochead_metadata_and_make);
- } else {
- an_object = header_extract(header_line, line_occur, an_object, type);
- }
- } else {
- // writeln(__LINE__);
- }
- }
- auto t = tuple(
- dochead_make,
- dochead_meta,
- );
- return t;
- }
- }
-}
diff --git a/src/sdp/ao_output_debugs.d b/src/sdp/ao_output_debugs.d
index 3c97640..525ebcc 100644
--- a/src/sdp/ao_output_debugs.d
+++ b/src/sdp/ao_output_debugs.d
@@ -8,8 +8,8 @@ template SiSUoutputDebugs() {
auto ref const S contents,
string[][string][string] bookindex_unordered_hashes,
JSONValue[] biblio,
- JSONValue[string] dochead_make,
- JSONValue[string] dochead_meta,
+ // JSONValue[string] dochead_make,
+ // JSONValue[string] dochead_meta,
string fn_src,
bool[string] opt_action_bool
) {
@@ -76,154 +76,6 @@ template SiSUoutputDebugs() {
}
}
}
- debug(headermakejson) {
- writefln(
- "%s\n%s\n%s",
- "document header, metadata & make instructions:",
- dochead_meta,
- pointer_head_main,
- );
- foreach (main_header; pointer_head_main) {
- switch (main_header) {
- case "make":
- foreach (sub_header; pointer_head_sub_make) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- default:
- break;
- }
- }
- }
- debug(headermetadatajson) {
- writefln(
- "%s\n%s\n%s",
- "document header, metadata & make instructions:",
- dochead_meta,
- pointer_head_main,
- );
- foreach (main_header; pointer_head_main) {
- switch (main_header) {
- case "creator":
- foreach (sub_header; pointer_head_sub_creator) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "title":
- foreach (sub_header; pointer_head_sub_title) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "rights":
- foreach (sub_header; pointer_head_sub_rights) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "date":
- foreach (sub_header; pointer_head_sub_date) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "original":
- foreach (sub_header; pointer_head_sub_original) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "classify":
- foreach (sub_header; pointer_head_sub_classify) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "identifier":
- foreach (sub_header; pointer_head_sub_identifier) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "notes":
- foreach (sub_header; pointer_head_sub_notes) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- case "publisher":
- foreach (sub_header; pointer_head_sub_publisher) {
- if (to!string(dochead_meta[main_header][sub_header]).length > 2) {
- writefln(
- "%s:%s: %s",
- main_header,
- sub_header,
- dochead_meta[main_header][sub_header]
- );
- }
- }
- break;
- default:
- break;
- }
- }
- }
debug(bookindex) {
writefln(
"%s\n%s:%s",
diff --git a/src/sdp/ao_read_config_files.d b/src/sdp/ao_read_config_files.d
new file mode 100644
index 0000000..47980da
--- /dev/null
+++ b/src/sdp/ao_read_config_files.d
@@ -0,0 +1,71 @@
+/+
+ ao_config_files.d
+ - read config files
++/
+template SiSUconfiguration() {
+ private import
+ std.exception,
+ // std.regex,
+ std.stdio,
+ std.utf,
+ std.conv : to;
+ // private import
+ // ao_rgx; // ao_defaults.d
+ // mixin RgxInit;
+ // auto rgx = Rgx();
+ private
+ struct Config {
+ private import std.file;
+ final private string readInConfigFile() {
+ // enforce(
+ // exists(fn_src)!=0,
+ // "file not found"
+ // );
+ string[] possible_config_path_locations = [
+ environment["PWD"] ~ "/.sisu",
+ environment["PWD"] ~ "/_sisu",
+ environment["HOME"] ~ "/.sisu",
+ "/etc/sisu"
+ ];
+ string conf_sdl = "conf.sdl";
+ string config_file_str;
+ foreach(pth; possible_config_path_locations) {
+ auto conf_file = format(
+ "%s/%s",
+ pth,
+ conf_sdl,
+ );
+ // writeln(conf_file);
+ try {
+ if (exists(conf_file)) {
+ writeln(conf_file);
+ config_file_str = readText(conf_file);
+ break;
+ }
+ }
+ catch (ErrnoException ex) {
+ //// Handle errors
+ // switch(ex.errno) {
+ // case EPERM:
+ // case EACCES:
+ // // Permission denied
+ // break;
+ // case ENOENT:
+ // // File does not exist
+ // break;
+ // default:
+ // // Handle other errors
+ // break;
+ // }
+ }
+ // catch (UTFException ex) {
+ // // Handle validation errors
+ // }
+ catch (FileException ex) {
+ // Handle errors
+ }
+ }
+ return config_file_str;
+ }
+ }
+}
diff --git a/src/sdp/ao_read_source_files.d b/src/sdp/ao_read_source_files.d
index e450bc8..ef9b8b4 100644
--- a/src/sdp/ao_read_source_files.d
+++ b/src/sdp/ao_read_source_files.d
@@ -20,7 +20,7 @@ template SiSUmarkupRaw() {
auto raw = MarkupRawUnit();
auto t =
raw.markupSourceHeaderContentRawLineTupleArray(fn_src, rgx.src_pth);
- auto header_content_raw = t[0];
+ auto header_raw = t[0];
auto sourcefile_content = t[1];
if (match(fn_src, rgx.src_fn_master)) {
auto ins = Inserts();
@@ -29,16 +29,69 @@ template SiSUmarkupRaw() {
// auto ins = SiSUdocInserts.Inserts();
}
t = tuple(
- header_content_raw,
+ header_raw,
sourcefile_content
);
return t;
}
}
private
+ struct HeaderExtractSDL {
+ final private auto headerMakeSDLang(in string src_header) {
+ scope(failure) {
+ stderr.writefln(
+ "%s\n%s\n%s:%s failed here:\n src_header: %s",
+ __MODULE__, __FUNCTION__,
+ __FILE__, __LINE__,
+ src_header,
+ );
+ }
+ Tag sdl_root_header;
+ try {
+ sdl_root_header = parseSource(src_header);
+ }
+ catch(SDLangParseException e) {
+ stderr.writeln("SDLang problem with this document header:");
+ stderr.writeln(src_header);
+ // Error messages of the form:
+ // myFile.sdl(5:28): Error: Invalid integer suffix.
+ stderr.writeln(e.msg);
+ }
+ debug(sdlang) {
+ // // Value is a std.variant.Algebraic
+ // Value output_dir_structure_by = sdl_root_header.tags["output_dir_structure_by"][0].values[0];
+ // assert(output_dir_structure_by.type == typeid(string));
+ // writeln(output_dir_structure_by);
+
+ // Tag person = sdl_root_header.namespaces["myNamespace"].tags["person"][0];
+ // writeln("Name: ", person.attributes["name"][0].value);
+ //
+ // int age = person.tags["age"][0].values[0].get!int();
+ // writeln("Age: ", age);
+
+ writeln("header SDL:");
+ writeln(sdl_root_header.toSDLDocument());
+ }
+ return sdl_root_header;
+ }
+ private auto headerSDLang(in char[] src_header) {
+ char[][] source_header_arr =
+ split(cast(char[]) src_header, rgx.line_delimiter);
+ char[] header_clean;
+ foreach(header_line; source_header_arr) {
+ if (!match(header_line, rgx.comments)) {
+ header_clean ~= header_line ~ "\n";
+ // writeln(header_line);
+ }
+ }
+ // writeln(header_clean); // consider
+ auto header_sdlang=headerMakeSDLang(to!string(header_clean));
+ return header_sdlang;
+ }
+ }
struct MarkupRawUnit {
private import std.file;
- enum State { off, on }
+ // enum State { off, on }
final private string readInMarkupSource(in string fn_src) {
enforce(
exists(fn_src)!=0,
diff --git a/views/version.txt b/views/version.txt
index c181dba..5fab19c 100644
--- a/views/version.txt
+++ b/views/version.txt
@@ -4,4 +4,4 @@ struct Version {
int minor;
int patch;
}
-enum ver = Version(0, 4, 1);
+enum ver = Version(0, 5, 0);