aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/ext_depends/D-YAML/source/dyaml
diff options
context:
space:
mode:
Diffstat (limited to 'src/ext_depends/D-YAML/source/dyaml')
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/composer.d375
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/constructor.d611
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/dumper.d287
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/emitter.d1689
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/encoding.d11
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/escapes.d92
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/event.d243
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/exception.d159
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/linebreak.d32
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/loader.d394
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/node.d2488
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/package.d15
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/parser.d958
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/queue.d272
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/reader.d906
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/representer.d517
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/resolver.d261
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/scanner.d1788
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/serializer.d322
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/style.d37
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/tagdirective.d15
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/common.d223
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/compare.d51
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/constructor.d957
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/emitter.d132
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/errors.d64
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/inputoutput.d92
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/reader.d37
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/representer.d54
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/resolver.d39
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/test/tokens.d93
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/tinyendian.d213
-rw-r--r--src/ext_depends/D-YAML/source/dyaml/token.d172
33 files changed, 13599 insertions, 0 deletions
diff --git a/src/ext_depends/D-YAML/source/dyaml/composer.d b/src/ext_depends/D-YAML/source/dyaml/composer.d
new file mode 100644
index 0000000..c000b02
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/composer.d
@@ -0,0 +1,375 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * Composes nodes from YAML events provided by parser.
+ * Code based on PyYAML: http://www.pyyaml.org
+ */
+module dyaml.composer;
+
+import core.memory;
+
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.exception;
+import std.range;
+import std.typecons;
+
+import dyaml.constructor;
+import dyaml.event;
+import dyaml.exception;
+import dyaml.node;
+import dyaml.parser;
+import dyaml.resolver;
+
+
+package:
+/**
+ * Exception thrown at composer errors.
+ *
+ * See_Also: MarkedYAMLException
+ */
+class ComposerException : MarkedYAMLException
+{
+ mixin MarkedExceptionCtors;
+}
+
+///Composes YAML documents from events provided by a Parser.
+struct Composer
+{
+ private:
+ ///Parser providing YAML events.
+ Parser parser_;
+ ///Resolver resolving tags (data types).
+ Resolver resolver_;
+ ///Nodes associated with anchors. Used by YAML aliases.
+ Node[string] anchors_;
+
+ ///Used to reduce allocations when creating pair arrays.
+ ///
+ ///We need one appender for each nesting level that involves
+ ///a pair array, as the inner levels are processed as a
+ ///part of the outer levels. Used as a stack.
+ Appender!(Node.Pair[])[] pairAppenders_;
+ ///Used to reduce allocations when creating node arrays.
+ ///
+ ///We need one appender for each nesting level that involves
+ ///a node array, as the inner levels are processed as a
+ ///part of the outer levels. Used as a stack.
+ Appender!(Node[])[] nodeAppenders_;
+
+ public:
+ /**
+ * Construct a composer.
+ *
+ * Params: parser = Parser to provide YAML events.
+ * resolver = Resolver to resolve tags (data types).
+ */
+ this(Parser parser, Resolver resolver) @safe
+ {
+ parser_ = parser;
+ resolver_ = resolver;
+ }
+
+ /**
+ * Determine if there are any nodes left.
+ *
+ * Must be called before loading as it handles the stream start event.
+ */
+ bool checkNode() @safe
+ {
+ // If next event is stream start, skip it
+ parser_.skipOver!"a.id == b"(EventID.streamStart);
+
+ //True if there are more documents available.
+ return parser_.front.id != EventID.streamEnd;
+ }
+
+ ///Get a YAML document as a node (the root of the document).
+ Node getNode() @safe
+ {
+ //Get the root node of the next document.
+ assert(parser_.front.id != EventID.streamEnd,
+ "Trying to get a node from Composer when there is no node to " ~
+ "get. use checkNode() to determine if there is a node.");
+
+ return composeDocument();
+ }
+
+ private:
+
+ void skipExpected(const EventID id) @safe
+ {
+ const foundExpected = parser_.skipOver!"a.id == b"(id);
+ assert(foundExpected, text("Expected ", id, " not found."));
+ }
+ ///Ensure that appenders for specified nesting levels exist.
+ ///
+ ///Params: pairAppenderLevel = Current level in the pair appender stack.
+ /// nodeAppenderLevel = Current level the node appender stack.
+ void ensureAppendersExist(const uint pairAppenderLevel, const uint nodeAppenderLevel)
+ @safe
+ {
+ while(pairAppenders_.length <= pairAppenderLevel)
+ {
+ pairAppenders_ ~= appender!(Node.Pair[])();
+ }
+ while(nodeAppenders_.length <= nodeAppenderLevel)
+ {
+ nodeAppenders_ ~= appender!(Node[])();
+ }
+ }
+
+ ///Compose a YAML document and return its root node.
+ Node composeDocument() @safe
+ {
+ skipExpected(EventID.documentStart);
+
+ //Compose the root node.
+ Node node = composeNode(0, 0);
+
+ skipExpected(EventID.documentEnd);
+
+ anchors_.destroy();
+ return node;
+ }
+
+ /// Compose a node.
+ ///
+ /// Params: pairAppenderLevel = Current level of the pair appender stack.
+ /// nodeAppenderLevel = Current level of the node appender stack.
+ Node composeNode(const uint pairAppenderLevel, const uint nodeAppenderLevel) @safe
+ {
+ if(parser_.front.id == EventID.alias_)
+ {
+ const event = parser_.front;
+ parser_.popFront();
+ const anchor = event.anchor;
+ enforce((anchor in anchors_) !is null,
+ new ComposerException("Found undefined alias: " ~ anchor,
+ event.startMark));
+
+ //If the node referenced by the anchor is uninitialized,
+ //it's not finished, i.e. we're currently composing it
+ //and trying to use it recursively here.
+ enforce(anchors_[anchor] != Node(),
+ new ComposerException("Found recursive alias: " ~ anchor,
+ event.startMark));
+
+ return anchors_[anchor];
+ }
+
+ const event = parser_.front;
+ const anchor = event.anchor;
+ if((anchor !is null) && (anchor in anchors_) !is null)
+ {
+ throw new ComposerException("Found duplicate anchor: " ~ anchor,
+ event.startMark);
+ }
+
+ Node result;
+ //Associate the anchor, if any, with an uninitialized node.
+ //used to detect duplicate and recursive anchors.
+ if(anchor !is null)
+ {
+ anchors_[anchor] = Node();
+ }
+
+ switch (parser_.front.id)
+ {
+ case EventID.scalar:
+ result = composeScalarNode();
+ break;
+ case EventID.sequenceStart:
+ result = composeSequenceNode(pairAppenderLevel, nodeAppenderLevel);
+ break;
+ case EventID.mappingStart:
+ result = composeMappingNode(pairAppenderLevel, nodeAppenderLevel);
+ break;
+ default: assert(false, "This code should never be reached");
+ }
+
+ if(anchor !is null)
+ {
+ anchors_[anchor] = result;
+ }
+ return result;
+ }
+
+ ///Compose a scalar node.
+ Node composeScalarNode() @safe
+ {
+ const event = parser_.front;
+ parser_.popFront();
+ const tag = resolver_.resolve(NodeID.scalar, event.tag, event.value,
+ event.implicit);
+
+ Node node = constructNode(event.startMark, event.endMark, tag,
+ event.value);
+ node.scalarStyle = event.scalarStyle;
+
+ return node;
+ }
+
+ /// Compose a sequence node.
+ ///
+ /// Params: pairAppenderLevel = Current level of the pair appender stack.
+ /// nodeAppenderLevel = Current level of the node appender stack.
+ Node composeSequenceNode(const uint pairAppenderLevel, const uint nodeAppenderLevel)
+ @safe
+ {
+ ensureAppendersExist(pairAppenderLevel, nodeAppenderLevel);
+ auto nodeAppender = &(nodeAppenders_[nodeAppenderLevel]);
+
+ const startEvent = parser_.front;
+ parser_.popFront();
+ const tag = resolver_.resolve(NodeID.sequence, startEvent.tag, null,
+ startEvent.implicit);
+
+ while(parser_.front.id != EventID.sequenceEnd)
+ {
+ nodeAppender.put(composeNode(pairAppenderLevel, nodeAppenderLevel + 1));
+ }
+
+ Node node = constructNode(startEvent.startMark, parser_.front.endMark,
+ tag, nodeAppender.data.dup);
+ node.collectionStyle = startEvent.collectionStyle;
+ parser_.popFront();
+ nodeAppender.clear();
+
+ return node;
+ }
+
+ /**
+ * Flatten a node, merging it with nodes referenced through YAMLMerge data type.
+ *
+ * Node must be a mapping or a sequence of mappings.
+ *
+ * Params: root = Node to flatten.
+ * startMark = Start position of the node.
+ * endMark = End position of the node.
+ * pairAppenderLevel = Current level of the pair appender stack.
+ * nodeAppenderLevel = Current level of the node appender stack.
+ *
+ * Returns: Flattened mapping as pairs.
+ */
+ Node.Pair[] flatten(ref Node root, const Mark startMark, const Mark endMark,
+ const uint pairAppenderLevel, const uint nodeAppenderLevel) @safe
+ {
+ void error(Node node)
+ {
+ //this is Composer, but the code is related to Constructor.
+ throw new ConstructorException("While constructing a mapping, " ~
+ "expected a mapping or a list of " ~
+ "mappings for merging, but found: " ~
+ text(node.type) ~
+ " NOTE: line/column shows topmost parent " ~
+ "to which the content is being merged",
+ startMark, endMark);
+ }
+
+ ensureAppendersExist(pairAppenderLevel, nodeAppenderLevel);
+ auto pairAppender = &(pairAppenders_[pairAppenderLevel]);
+
+ final switch (root.nodeID)
+ {
+ case NodeID.mapping:
+ Node[] toMerge;
+ toMerge.reserve(root.length);
+ foreach (ref Node key, ref Node value; root)
+ {
+ if(key.type == NodeType.merge)
+ {
+ toMerge ~= value;
+ }
+ else
+ {
+ auto temp = Node.Pair(key, value);
+ pairAppender.put(temp);
+ }
+ }
+ foreach (node; toMerge)
+ {
+ pairAppender.put(flatten(node, startMark, endMark,
+ pairAppenderLevel + 1, nodeAppenderLevel));
+ }
+ break;
+ case NodeID.sequence:
+ foreach (ref Node node; root)
+ {
+ if (node.nodeID != NodeID.mapping)
+ {
+ error(node);
+ }
+ pairAppender.put(flatten(node, startMark, endMark,
+ pairAppenderLevel + 1, nodeAppenderLevel));
+ }
+ break;
+ case NodeID.scalar:
+ case NodeID.invalid:
+ error(root);
+ break;
+ }
+
+ auto flattened = pairAppender.data.dup;
+ pairAppender.clear();
+
+ return flattened;
+ }
+
+ /// Compose a mapping node.
+ ///
+ /// Params: pairAppenderLevel = Current level of the pair appender stack.
+ /// nodeAppenderLevel = Current level of the node appender stack.
+ Node composeMappingNode(const uint pairAppenderLevel, const uint nodeAppenderLevel)
+ @safe
+ {
+ ensureAppendersExist(pairAppenderLevel, nodeAppenderLevel);
+ const startEvent = parser_.front;
+ parser_.popFront();
+ const tag = resolver_.resolve(NodeID.mapping, startEvent.tag, null,
+ startEvent.implicit);
+ auto pairAppender = &(pairAppenders_[pairAppenderLevel]);
+
+ Tuple!(Node, Mark)[] toMerge;
+ while(parser_.front.id != EventID.mappingEnd)
+ {
+ auto pair = Node.Pair(composeNode(pairAppenderLevel + 1, nodeAppenderLevel),
+ composeNode(pairAppenderLevel + 1, nodeAppenderLevel));
+
+ //Need to flatten and merge the node referred by YAMLMerge.
+ if(pair.key.type == NodeType.merge)
+ {
+ toMerge ~= tuple(pair.value, cast(Mark)parser_.front.endMark);
+ }
+ //Not YAMLMerge, just add the pair.
+ else
+ {
+ pairAppender.put(pair);
+ }
+ }
+ foreach(node; toMerge)
+ {
+ merge(*pairAppender, flatten(node[0], startEvent.startMark, node[1],
+ pairAppenderLevel + 1, nodeAppenderLevel));
+ }
+ auto numUnique = pairAppender.data.dup
+ .sort!((x,y) => x.key > y.key)
+ .uniq!((x,y) => x.key == y.key)
+ .walkLength;
+ enforce(numUnique == pairAppender.data.length,
+ new ComposerException("Duplicate key found in mapping", parser_.front.startMark));
+
+ Node node = constructNode(startEvent.startMark, parser_.front.endMark,
+ tag, pairAppender.data.dup);
+ node.collectionStyle = startEvent.collectionStyle;
+ parser_.popFront();
+
+ pairAppender.clear();
+ return node;
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/constructor.d b/src/ext_depends/D-YAML/source/dyaml/constructor.d
new file mode 100644
index 0000000..bc1d75c
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/constructor.d
@@ -0,0 +1,611 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * Class that processes YAML mappings, sequences and scalars into nodes.
+ * This can be used to add custom data types. A tutorial can be found
+ * $(LINK2 https://dlang-community.github.io/D-YAML/, here).
+ */
+module dyaml.constructor;
+
+
+import std.array;
+import std.algorithm;
+import std.base64;
+import std.container;
+import std.conv;
+import std.datetime;
+import std.exception;
+import std.regex;
+import std.string;
+import std.typecons;
+import std.utf;
+
+import dyaml.node;
+import dyaml.exception;
+import dyaml.style;
+
+package:
+
+// Exception thrown at constructor errors.
+class ConstructorException : YAMLException
+{
+ /// Construct a ConstructorException.
+ ///
+ /// Params: msg = Error message.
+ /// start = Start position of the error context.
+ /// end = End position of the error context.
+ this(string msg, Mark start, Mark end, string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super(msg ~ "\nstart: " ~ start.toString() ~ "\nend: " ~ end.toString(),
+ file, line);
+ }
+}
+
+/** Constructs YAML values.
+ *
+ * Each YAML scalar, sequence or mapping has a tag specifying its data type.
+ * Constructor uses user-specifyable functions to create a node of desired
+ * data type from a scalar, sequence or mapping.
+ *
+ *
+ * Each of these functions is associated with a tag, and can process either
+ * a scalar, a sequence, or a mapping. The constructor passes each value to
+ * the function with corresponding tag, which then returns the resulting value
+ * that can be stored in a node.
+ *
+ * If a tag is detected with no known constructor function, it is considered an error.
+ */
+/*
+ * Construct a node.
+ *
+ * Params: start = Start position of the node.
+ * end = End position of the node.
+ * tag = Tag (data type) of the node.
+ * value = Value to construct node from (string, nodes or pairs).
+ * style = Style of the node (scalar or collection style).
+ *
+ * Returns: Constructed node.
+ */
+Node constructNode(T)(const Mark start, const Mark end, const string tag,
+ T value) @safe
+ if((is(T : string) || is(T == Node[]) || is(T == Node.Pair[])))
+{
+ Node newNode;
+ try
+ {
+ switch(tag)
+ {
+ case "tag:yaml.org,2002:null":
+ newNode = Node(YAMLNull(), tag);
+ break;
+ case "tag:yaml.org,2002:bool":
+ static if(is(T == string))
+ {
+ newNode = Node(constructBool(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be bools");
+ case "tag:yaml.org,2002:int":
+ static if(is(T == string))
+ {
+ newNode = Node(constructLong(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be ints");
+ case "tag:yaml.org,2002:float":
+ static if(is(T == string))
+ {
+ newNode = Node(constructReal(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be floats");
+ case "tag:yaml.org,2002:binary":
+ static if(is(T == string))
+ {
+ newNode = Node(constructBinary(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be binary data");
+ case "tag:yaml.org,2002:timestamp":
+ static if(is(T == string))
+ {
+ newNode = Node(constructTimestamp(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be timestamps");
+ case "tag:yaml.org,2002:str":
+ static if(is(T == string))
+ {
+ newNode = Node(constructString(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be strings");
+ case "tag:yaml.org,2002:value":
+ static if(is(T == string))
+ {
+ newNode = Node(constructString(value), tag);
+ break;
+ }
+ else throw new Exception("Only scalars can be values");
+ case "tag:yaml.org,2002:omap":
+ static if(is(T == Node[]))
+ {
+ newNode = Node(constructOrderedMap(value), tag);
+ break;
+ }
+ else throw new Exception("Only sequences can be ordered maps");
+ case "tag:yaml.org,2002:pairs":
+ static if(is(T == Node[]))
+ {
+ newNode = Node(constructPairs(value), tag);
+ break;
+ }
+ else throw new Exception("Only sequences can be pairs");
+ case "tag:yaml.org,2002:set":
+ static if(is(T == Node.Pair[]))
+ {
+ newNode = Node(constructSet(value), tag);
+ break;
+ }
+ else throw new Exception("Only mappings can be sets");
+ case "tag:yaml.org,2002:seq":
+ static if(is(T == Node[]))
+ {
+ newNode = Node(constructSequence(value), tag);
+ break;
+ }
+ else throw new Exception("Only sequences can be sequences");
+ case "tag:yaml.org,2002:map":
+ static if(is(T == Node.Pair[]))
+ {
+ newNode = Node(constructMap(value), tag);
+ break;
+ }
+ else throw new Exception("Only mappings can be maps");
+ case "tag:yaml.org,2002:merge":
+ newNode = Node(YAMLMerge(), tag);
+ break;
+ default:
+ newNode = Node(value, tag);
+ break;
+ }
+ }
+ catch(Exception e)
+ {
+ throw new ConstructorException("Error constructing " ~ typeid(T).toString()
+ ~ ":\n" ~ e.msg, start, end);
+ }
+
+ newNode.startMark_ = start;
+
+ return newNode;
+}
+
+private:
+// Construct a boolean _node.
+bool constructBool(const string str) @safe
+{
+ string value = str.toLower();
+ if(value.among!("yes", "true", "on")){return true;}
+ if(value.among!("no", "false", "off")){return false;}
+ throw new Exception("Unable to parse boolean value: " ~ value);
+}
+
+// Construct an integer (long) _node.
+long constructLong(const string str) @safe
+{
+ string value = str.replace("_", "");
+ const char c = value[0];
+ const long sign = c != '-' ? 1 : -1;
+ if(c == '-' || c == '+')
+ {
+ value = value[1 .. $];
+ }
+
+ enforce(value != "", new Exception("Unable to parse float value: " ~ value));
+
+ long result;
+ try
+ {
+ //Zero.
+ if(value == "0") {result = cast(long)0;}
+ //Binary.
+ else if(value.startsWith("0b")){result = sign * to!int(value[2 .. $], 2);}
+ //Hexadecimal.
+ else if(value.startsWith("0x")){result = sign * to!int(value[2 .. $], 16);}
+ //Octal.
+ else if(value[0] == '0') {result = sign * to!int(value, 8);}
+ //Sexagesimal.
+ else if(value.canFind(":"))
+ {
+ long val;
+ long base = 1;
+ foreach_reverse(digit; value.split(":"))
+ {
+ val += to!long(digit) * base;
+ base *= 60;
+ }
+ result = sign * val;
+ }
+ //Decimal.
+ else{result = sign * to!long(value);}
+ }
+ catch(ConvException e)
+ {
+ throw new Exception("Unable to parse integer value: " ~ value);
+ }
+
+ return result;
+}
+@safe unittest
+{
+ string canonical = "685230";
+ string decimal = "+685_230";
+ string octal = "02472256";
+ string hexadecimal = "0x_0A_74_AE";
+ string binary = "0b1010_0111_0100_1010_1110";
+ string sexagesimal = "190:20:30";
+
+ assert(685230 == constructLong(canonical));
+ assert(685230 == constructLong(decimal));
+ assert(685230 == constructLong(octal));
+ assert(685230 == constructLong(hexadecimal));
+ assert(685230 == constructLong(binary));
+ assert(685230 == constructLong(sexagesimal));
+}
+
+// Construct a floating point (real) _node.
+real constructReal(const string str) @safe
+{
+ string value = str.replace("_", "").toLower();
+ const char c = value[0];
+ const real sign = c != '-' ? 1.0 : -1.0;
+ if(c == '-' || c == '+')
+ {
+ value = value[1 .. $];
+ }
+
+ enforce(value != "" && value != "nan" && value != "inf" && value != "-inf",
+ new Exception("Unable to parse float value: " ~ value));
+
+ real result;
+ try
+ {
+ //Infinity.
+ if (value == ".inf"){result = sign * real.infinity;}
+ //Not a Number.
+ else if(value == ".nan"){result = real.nan;}
+ //Sexagesimal.
+ else if(value.canFind(":"))
+ {
+ real val = 0.0;
+ real base = 1.0;
+ foreach_reverse(digit; value.split(":"))
+ {
+ val += to!real(digit) * base;
+ base *= 60.0;
+ }
+ result = sign * val;
+ }
+ //Plain floating point.
+ else{result = sign * to!real(value);}
+ }
+ catch(ConvException e)
+ {
+ throw new Exception("Unable to parse float value: \"" ~ value ~ "\"");
+ }
+
+ return result;
+}
+@safe unittest
+{
+ bool eq(real a, real b, real epsilon = 0.2) @safe
+ {
+ return a >= (b - epsilon) && a <= (b + epsilon);
+ }
+
+ string canonical = "6.8523015e+5";
+ string exponential = "685.230_15e+03";
+ string fixed = "685_230.15";
+ string sexagesimal = "190:20:30.15";
+ string negativeInf = "-.inf";
+ string NaN = ".NaN";
+
+ assert(eq(685230.15, constructReal(canonical)));
+ assert(eq(685230.15, constructReal(exponential)));
+ assert(eq(685230.15, constructReal(fixed)));
+ assert(eq(685230.15, constructReal(sexagesimal)));
+ assert(eq(-real.infinity, constructReal(negativeInf)));
+ assert(to!string(constructReal(NaN)) == "nan");
+}
+
+// Construct a binary (base64) _node.
+ubyte[] constructBinary(const string value) @safe
+{
+ import std.ascii : newline;
+ import std.array : array;
+
+ // For an unknown reason, this must be nested to work (compiler bug?).
+ try
+ {
+ return Base64.decode(value.representation.filter!(c => !newline.canFind(c)).array);
+ }
+ catch(Base64Exception e)
+ {
+ throw new Exception("Unable to decode base64 value: " ~ e.msg);
+ }
+}
+
+@safe unittest
+{
+ auto test = "The Answer: 42".representation;
+ char[] buffer;
+ buffer.length = 256;
+ string input = Base64.encode(test, buffer).idup;
+ const value = constructBinary(input);
+ assert(value == test);
+ assert(value == [84, 104, 101, 32, 65, 110, 115, 119, 101, 114, 58, 32, 52, 50]);
+}
+
+// Construct a timestamp (SysTime) _node.
+SysTime constructTimestamp(const string str) @safe
+{
+ string value = str;
+
+ auto YMDRegexp = regex("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)");
+ auto HMSRegexp = regex("^[Tt \t]+([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(\\.[0-9]*)?");
+ auto TZRegexp = regex("^[ \t]*Z|([-+][0-9][0-9]?)(:[0-9][0-9])?");
+
+ try
+ {
+ // First, get year, month and day.
+ auto matches = match(value, YMDRegexp);
+
+ enforce(!matches.empty,
+ new Exception("Unable to parse timestamp value: " ~ value));
+
+ auto captures = matches.front.captures;
+ const year = to!int(captures[1]);
+ const month = to!int(captures[2]);
+ const day = to!int(captures[3]);
+
+ // If available, get hour, minute, second and fraction, if present.
+ value = matches.front.post;
+ matches = match(value, HMSRegexp);
+ if(matches.empty)
+ {
+ return SysTime(DateTime(year, month, day), UTC());
+ }
+
+ captures = matches.front.captures;
+ const hour = to!int(captures[1]);
+ const minute = to!int(captures[2]);
+ const second = to!int(captures[3]);
+ const hectonanosecond = cast(int)(to!real("0" ~ captures[4]) * 10_000_000);
+
+ // If available, get timezone.
+ value = matches.front.post;
+ matches = match(value, TZRegexp);
+ if(matches.empty || matches.front.captures[0] == "Z")
+ {
+ // No timezone.
+ return SysTime(DateTime(year, month, day, hour, minute, second),
+ hectonanosecond.dur!"hnsecs", UTC());
+ }
+
+ // We have a timezone, so parse it.
+ captures = matches.front.captures;
+ int sign = 1;
+ int tzHours;
+ if(!captures[1].empty)
+ {
+ if(captures[1][0] == '-') {sign = -1;}
+ tzHours = to!int(captures[1][1 .. $]);
+ }
+ const tzMinutes = (!captures[2].empty) ? to!int(captures[2][1 .. $]) : 0;
+ const tzOffset = dur!"minutes"(sign * (60 * tzHours + tzMinutes));
+
+ return SysTime(DateTime(year, month, day, hour, minute, second),
+ hectonanosecond.dur!"hnsecs",
+ new immutable SimpleTimeZone(tzOffset));
+ }
+ catch(ConvException e)
+ {
+ throw new Exception("Unable to parse timestamp value " ~ value ~ " : " ~ e.msg);
+ }
+ catch(DateTimeException e)
+ {
+ throw new Exception("Invalid timestamp value " ~ value ~ " : " ~ e.msg);
+ }
+
+ assert(false, "This code should never be reached");
+}
+@safe unittest
+{
+ string timestamp(string value)
+ {
+ return constructTimestamp(value).toISOString();
+ }
+
+ string canonical = "2001-12-15T02:59:43.1Z";
+ string iso8601 = "2001-12-14t21:59:43.10-05:00";
+ string spaceSeparated = "2001-12-14 21:59:43.10 -5";
+ string noTZ = "2001-12-15 2:59:43.10";
+ string noFraction = "2001-12-15 2:59:43";
+ string ymd = "2002-12-14";
+
+ assert(timestamp(canonical) == "20011215T025943.1Z");
+ //avoiding float conversion errors
+ assert(timestamp(iso8601) == "20011214T215943.0999999-05:00" ||
+ timestamp(iso8601) == "20011214T215943.1-05:00");
+ assert(timestamp(spaceSeparated) == "20011214T215943.0999999-05:00" ||
+ timestamp(spaceSeparated) == "20011214T215943.1-05:00");
+ assert(timestamp(noTZ) == "20011215T025943.0999999Z" ||
+ timestamp(noTZ) == "20011215T025943.1Z");
+ assert(timestamp(noFraction) == "20011215T025943Z");
+ assert(timestamp(ymd) == "20021214T000000Z");
+}
+
+// Construct a string _node.
+string constructString(const string str) @safe
+{
+ return str;
+}
+
+// Convert a sequence of single-element mappings into a sequence of pairs.
+Node.Pair[] getPairs(string type, const Node[] nodes) @safe
+{
+ Node.Pair[] pairs;
+ pairs.reserve(nodes.length);
+ foreach(node; nodes)
+ {
+ enforce(node.nodeID == NodeID.mapping && node.length == 1,
+ new Exception("While constructing " ~ type ~
+ ", expected a mapping with single element"));
+
+ pairs ~= node.as!(Node.Pair[]);
+ }
+
+ return pairs;
+}
+
+// Construct an ordered map (ordered sequence of key:value pairs without duplicates) _node.
+Node.Pair[] constructOrderedMap(const Node[] nodes) @safe
+{
+ auto pairs = getPairs("ordered map", nodes);
+
+ //Detect duplicates.
+ //TODO this should be replaced by something with deterministic memory allocation.
+ auto keys = redBlackTree!Node();
+ foreach(ref pair; pairs)
+ {
+ enforce(!(pair.key in keys),
+ new Exception("Duplicate entry in an ordered map: "
+ ~ pair.key.debugString()));
+ keys.insert(pair.key);
+ }
+ return pairs;
+}
+@safe unittest
+{
+ Node[] alternateTypes(uint length) @safe
+ {
+ Node[] pairs;
+ foreach(long i; 0 .. length)
+ {
+ auto pair = (i % 2) ? Node.Pair(i.to!string, i) : Node.Pair(i, i.to!string);
+ pairs ~= Node([pair]);
+ }
+ return pairs;
+ }
+
+ Node[] sameType(uint length) @safe
+ {
+ Node[] pairs;
+ foreach(long i; 0 .. length)
+ {
+ auto pair = Node.Pair(i.to!string, i);
+ pairs ~= Node([pair]);
+ }
+ return pairs;
+ }
+
+ assertThrown(constructOrderedMap(alternateTypes(8) ~ alternateTypes(2)));
+ assertNotThrown(constructOrderedMap(alternateTypes(8)));
+ assertThrown(constructOrderedMap(sameType(64) ~ sameType(16)));
+ assertThrown(constructOrderedMap(alternateTypes(64) ~ alternateTypes(16)));
+ assertNotThrown(constructOrderedMap(sameType(64)));
+ assertNotThrown(constructOrderedMap(alternateTypes(64)));
+}
+
+// Construct a pairs (ordered sequence of key: value pairs allowing duplicates) _node.
+Node.Pair[] constructPairs(const Node[] nodes) @safe
+{
+ return getPairs("pairs", nodes);
+}
+
+// Construct a set _node.
+Node[] constructSet(const Node.Pair[] pairs) @safe
+{
+ // In future, the map here should be replaced with something with deterministic
+ // memory allocation if possible.
+ // Detect duplicates.
+ ubyte[Node] map;
+ Node[] nodes;
+ nodes.reserve(pairs.length);
+ foreach(pair; pairs)
+ {
+ enforce((pair.key in map) is null, new Exception("Duplicate entry in a set"));
+ map[pair.key] = 0;
+ nodes ~= pair.key;
+ }
+
+ return nodes;
+}
+@safe unittest
+{
+ Node.Pair[] set(uint length) @safe
+ {
+ Node.Pair[] pairs;
+ foreach(long i; 0 .. length)
+ {
+ pairs ~= Node.Pair(i.to!string, YAMLNull());
+ }
+
+ return pairs;
+ }
+
+ auto DuplicatesShort = set(8) ~ set(2);
+ auto noDuplicatesShort = set(8);
+ auto DuplicatesLong = set(64) ~ set(4);
+ auto noDuplicatesLong = set(64);
+
+ bool eq(Node.Pair[] a, Node[] b)
+ {
+ if(a.length != b.length){return false;}
+ foreach(i; 0 .. a.length)
+ {
+ if(a[i].key != b[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ auto nodeDuplicatesShort = DuplicatesShort.dup;
+ auto nodeNoDuplicatesShort = noDuplicatesShort.dup;
+ auto nodeDuplicatesLong = DuplicatesLong.dup;
+ auto nodeNoDuplicatesLong = noDuplicatesLong.dup;
+
+ assertThrown(constructSet(nodeDuplicatesShort));
+ assertNotThrown(constructSet(nodeNoDuplicatesShort));
+ assertThrown(constructSet(nodeDuplicatesLong));
+ assertNotThrown(constructSet(nodeNoDuplicatesLong));
+}
+
+// Construct a sequence (array) _node.
+Node[] constructSequence(Node[] nodes) @safe
+{
+ return nodes;
+}
+
+// Construct an unordered map (unordered set of key:value _pairs without duplicates) _node.
+Node.Pair[] constructMap(Node.Pair[] pairs) @safe
+{
+ //Detect duplicates.
+ //TODO this should be replaced by something with deterministic memory allocation.
+ auto keys = redBlackTree!Node();
+ foreach(ref pair; pairs)
+ {
+ enforce(!(pair.key in keys),
+ new Exception("Duplicate entry in a map: " ~ pair.key.debugString()));
+ keys.insert(pair.key);
+ }
+ return pairs;
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/dumper.d b/src/ext_depends/D-YAML/source/dyaml/dumper.d
new file mode 100644
index 0000000..51f232f
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/dumper.d
@@ -0,0 +1,287 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * YAML dumper.
+ *
+ * Code based on $(LINK2 http://www.pyyaml.org, PyYAML).
+ */
+module dyaml.dumper;
+
+import std.array;
+import std.range.primitives;
+import std.typecons;
+
+import dyaml.emitter;
+import dyaml.event;
+import dyaml.exception;
+import dyaml.linebreak;
+import dyaml.node;
+import dyaml.representer;
+import dyaml.resolver;
+import dyaml.serializer;
+import dyaml.style;
+import dyaml.tagdirective;
+
+
+/**
+ * Dumps YAML documents to files or streams.
+ *
+ * User specified Representer and/or Resolver can be used to support new
+ * tags / data types.
+ *
+ * Setters are provided to affect output details (style, etc.).
+ */
+auto dumper()
+{
+ auto dumper = Dumper();
+ dumper.resolver = Resolver.withDefaultResolvers;
+ return dumper;
+}
+
+struct Dumper
+{
+ private:
+ //Indentation width.
+ int indent_ = 2;
+ //Tag directives to use.
+ TagDirective[] tags_;
+ public:
+ //Resolver to resolve tags.
+ Resolver resolver;
+ //Write scalars in canonical form?
+ bool canonical;
+ //Preferred text width.
+ uint textWidth = 80;
+ //Line break to use. Unix by default.
+ LineBreak lineBreak = LineBreak.unix;
+ //YAML version string. Default is 1.1.
+ string YAMLVersion = "1.1";
+ //Always explicitly write document start? Default is no explicit start.
+ bool explicitStart = false;
+ //Always explicitly write document end? Default is no explicit end.
+ bool explicitEnd = false;
+
+ //Name of the output file or stream, used in error messages.
+ string name = "<unknown>";
+
+ // Default style for scalar nodes. If style is $(D ScalarStyle.invalid), the _style is chosen automatically.
+ ScalarStyle defaultScalarStyle = ScalarStyle.invalid;
+ // Default style for collection nodes. If style is $(D CollectionStyle.invalid), the _style is chosen automatically.
+ CollectionStyle defaultCollectionStyle = CollectionStyle.invalid;
+
+ @disable bool opEquals(ref Dumper);
+ @disable int opCmp(ref Dumper);
+
+ ///Set indentation width. 2 by default. Must not be zero.
+ @property void indent(uint indent) pure @safe nothrow
+ in
+ {
+ assert(indent != 0, "Can't use zero YAML indent width");
+ }
+ do
+ {
+ indent_ = indent;
+ }
+
+ /**
+ * Specify tag directives.
+ *
+ * A tag directive specifies a shorthand notation for specifying _tags.
+ * Each tag directive associates a handle with a prefix. This allows for
+ * compact tag notation.
+ *
+ * Each handle specified MUST start and end with a '!' character
+ * (a single character "!" handle is allowed as well).
+ *
+ * Only alphanumeric characters, '-', and '__' may be used in handles.
+ *
+ * Each prefix MUST not be empty.
+ *
+ * The "!!" handle is used for default YAML _tags with prefix
+ * "tag:yaml.org,2002:". This can be overridden.
+ *
+ * Params: tags = Tag directives (keys are handles, values are prefixes).
+ */
+ @property void tagDirectives(string[string] tags) pure @safe
+ {
+ TagDirective[] t;
+ foreach(handle, prefix; tags)
+ {
+ assert(handle.length >= 1 && handle[0] == '!' && handle[$ - 1] == '!',
+ "A tag handle is empty or does not start and end with a " ~
+ "'!' character : " ~ handle);
+ assert(prefix.length >= 1, "A tag prefix is empty");
+ t ~= TagDirective(handle, prefix);
+ }
+ tags_ = t;
+ }
+ ///
+ @safe unittest
+ {
+ auto dumper = dumper();
+ string[string] directives;
+ directives["!short!"] = "tag:long.org,2011:";
+ //This will emit tags starting with "tag:long.org,2011"
+ //with a "!short!" prefix instead.
+ dumper.tagDirectives(directives);
+ dumper.dump(new Appender!string(), Node("foo"));
+ }
+
+ /**
+ * Dump one or more YAML _documents to the file/stream.
+ *
+ * Note that while you can call dump() multiple times on the same
+ * dumper, you will end up writing multiple YAML "files" to the same
+ * file/stream.
+ *
+ * Params: documents = Documents to _dump (root nodes of the _documents).
+ *
+ * Throws: YAMLException on error (e.g. invalid nodes,
+ * unable to write to file/stream).
+ */
+ void dump(CharacterType = char, Range)(Range range, Node[] documents ...)
+ if (isOutputRange!(Range, CharacterType) &&
+ isOutputRange!(Range, char) || isOutputRange!(Range, wchar) || isOutputRange!(Range, dchar))
+ {
+ try
+ {
+ auto emitter = new Emitter!(Range, CharacterType)(range, canonical, indent_, textWidth, lineBreak);
+ auto serializer = Serializer(resolver, explicitStart ? Yes.explicitStart : No.explicitStart,
+ explicitEnd ? Yes.explicitEnd : No.explicitEnd, YAMLVersion, tags_);
+ serializer.startStream(emitter);
+ foreach(ref document; documents)
+ {
+ auto data = representData(document, defaultScalarStyle, defaultCollectionStyle);
+ serializer.serialize(emitter, data);
+ }
+ serializer.endStream(emitter);
+ }
+ catch(YAMLException e)
+ {
+ throw new YAMLException("Unable to dump YAML to stream "
+ ~ name ~ " : " ~ e.msg, e.file, e.line);
+ }
+ }
+}
+///Write to a file
+@safe unittest
+{
+ auto node = Node([1, 2, 3, 4, 5]);
+ dumper().dump(new Appender!string(), node);
+}
+///Write multiple YAML documents to a file
+@safe unittest
+{
+ auto node1 = Node([1, 2, 3, 4, 5]);
+ auto node2 = Node("This document contains only one string");
+ dumper().dump(new Appender!string(), node1, node2);
+ //Or with an array:
+ dumper().dump(new Appender!string(), [node1, node2]);
+}
+///Write to memory
+@safe unittest
+{
+ auto stream = new Appender!string();
+ auto node = Node([1, 2, 3, 4, 5]);
+ dumper().dump(stream, node);
+}
+///Use a custom resolver to support custom data types and/or implicit tags
+@safe unittest
+{
+ import std.regex : regex;
+ auto node = Node([1, 2, 3, 4, 5]);
+ auto dumper = dumper();
+ dumper.resolver.addImplicitResolver("!tag", regex("A.*"), "A");
+ dumper.dump(new Appender!string(), node);
+}
+/// Set default scalar style
+@safe unittest
+{
+ auto stream = new Appender!string();
+ auto node = Node("Hello world!");
+ auto dumper = dumper();
+ dumper.defaultScalarStyle = ScalarStyle.singleQuoted;
+ dumper.dump(stream, node);
+}
+/// Set default collection style
+@safe unittest
+{
+ auto stream = new Appender!string();
+ auto node = Node(["Hello", "world!"]);
+ auto dumper = dumper();
+ dumper.defaultCollectionStyle = CollectionStyle.flow;
+ dumper.dump(stream, node);
+}
+// Make sure the styles are actually used
+@safe unittest
+{
+ auto stream = new Appender!string();
+ auto node = Node([Node("Hello world!"), Node(["Hello", "world!"])]);
+ auto dumper = dumper();
+ dumper.defaultScalarStyle = ScalarStyle.singleQuoted;
+ dumper.defaultCollectionStyle = CollectionStyle.flow;
+ dumper.explicitEnd = false;
+ dumper.explicitStart = false;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+ assert(stream.data == "[!!str 'Hello world!', [!!str 'Hello', !!str 'world!']]\n");
+}
+// Explicit document start/end markers
+@safe unittest
+{
+ auto stream = new Appender!string();
+ auto node = Node([1, 2, 3, 4, 5]);
+ auto dumper = dumper();
+ dumper.explicitEnd = true;
+ dumper.explicitStart = true;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+ //Skip version string
+ assert(stream.data[0..3] == "---");
+ //account for newline at end
+ assert(stream.data[$-4..$-1] == "...");
+}
+// No explicit document start/end markers
+@safe unittest
+{
+ auto stream = new Appender!string();
+ auto node = Node([1, 2, 3, 4, 5]);
+ auto dumper = dumper();
+ dumper.explicitEnd = false;
+ dumper.explicitStart = false;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+ //Skip version string
+ assert(stream.data[0..3] != "---");
+ //account for newline at end
+ assert(stream.data[$-4..$-1] != "...");
+}
+// Windows, macOS line breaks
+@safe unittest
+{
+ auto node = Node(0);
+ {
+ auto stream = new Appender!string();
+ auto dumper = dumper();
+ dumper.explicitEnd = true;
+ dumper.explicitStart = true;
+ dumper.YAMLVersion = null;
+ dumper.lineBreak = LineBreak.windows;
+ dumper.dump(stream, node);
+ assert(stream.data == "--- 0\r\n...\r\n");
+ }
+ {
+ auto stream = new Appender!string();
+ auto dumper = dumper();
+ dumper.explicitEnd = true;
+ dumper.explicitStart = true;
+ dumper.YAMLVersion = null;
+ dumper.lineBreak = LineBreak.macintosh;
+ dumper.dump(stream, node);
+ assert(stream.data == "--- 0\r...\r");
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/emitter.d b/src/ext_depends/D-YAML/source/dyaml/emitter.d
new file mode 100644
index 0000000..c797eb9
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/emitter.d
@@ -0,0 +1,1689 @@
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * YAML emitter.
+ * Code based on PyYAML: http://www.pyyaml.org
+ */
+module dyaml.emitter;
+
+
+import std.algorithm;
+import std.array;
+import std.ascii;
+import std.conv;
+import std.encoding;
+import std.exception;
+import std.format;
+import std.range;
+import std.string;
+import std.system;
+import std.typecons;
+import std.utf;
+
+import dyaml.encoding;
+import dyaml.escapes;
+import dyaml.event;
+import dyaml.exception;
+import dyaml.linebreak;
+import dyaml.queue;
+import dyaml.style;
+import dyaml.tagdirective;
+
+
+package:
+
+//Stores results of analysis of a scalar, determining e.g. what scalar style to use.
+struct ScalarAnalysis
+{
+ //Scalar itself.
+ string scalar;
+
+ enum AnalysisFlags
+ {
+ empty = 1<<0,
+ multiline = 1<<1,
+ allowFlowPlain = 1<<2,
+ allowBlockPlain = 1<<3,
+ allowSingleQuoted = 1<<4,
+ allowDoubleQuoted = 1<<5,
+ allowBlock = 1<<6,
+ isNull = 1<<7
+ }
+
+ ///Analysis results.
+ BitFlags!AnalysisFlags flags;
+}
+
+private alias isNewLine = among!('\n', '\u0085', '\u2028', '\u2029');
+
+private alias isSpecialChar = among!('#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\\', '\'', '"', '%', '@', '`');
+
+private alias isFlowIndicator = among!(',', '?', '[', ']', '{', '}');
+
+private alias isSpace = among!('\0', '\n', '\r', '\u0085', '\u2028', '\u2029', ' ', '\t');
+
+//Emits YAML events into a file/stream.
+struct Emitter(Range, CharType) if (isOutputRange!(Range, CharType))
+{
+ private:
+ ///Default tag handle shortcuts and replacements.
+ static TagDirective[] defaultTagDirectives_ =
+ [TagDirective("!", "!"), TagDirective("!!", "tag:yaml.org,2002:")];
+
+ ///Stream to write to.
+ Range stream_;
+
+ /// Type used for upcoming emitter steps
+ alias EmitterFunction = void function(typeof(this)*) @safe;
+
+ ///Stack of states.
+ Appender!(EmitterFunction[]) states_;
+
+ ///Current state.
+ EmitterFunction state_;
+
+ ///Event queue.
+ Queue!Event events_;
+ ///Event we're currently emitting.
+ Event event_;
+
+ ///Stack of previous indentation levels.
+ Appender!(int[]) indents_;
+ ///Current indentation level.
+ int indent_ = -1;
+
+ ///Level of nesting in flow context. If 0, we're in block context.
+ uint flowLevel_ = 0;
+
+ /// Describes context (where we are in the document).
+ enum Context
+ {
+ /// Root node of a document.
+ root,
+ /// Sequence.
+ sequence,
+ /// Mapping.
+ mappingNoSimpleKey,
+ /// Mapping, in a simple key.
+ mappingSimpleKey,
+ }
+ /// Current context.
+ Context context_;
+
+ ///Characteristics of the last emitted character:
+
+ ///Line.
+ uint line_ = 0;
+ ///Column.
+ uint column_ = 0;
+ ///Whitespace character?
+ bool whitespace_ = true;
+ ///indentation space, '-', '?', or ':'?
+ bool indentation_ = true;
+
+ ///Does the document require an explicit document indicator?
+ bool openEnded_;
+
+ ///Formatting details.
+
+ ///Canonical scalar format?
+ bool canonical_;
+ ///Best indentation width.
+ uint bestIndent_ = 2;
+ ///Best text width.
+ uint bestWidth_ = 80;
+ ///Best line break character/s.
+ LineBreak bestLineBreak_;
+
+ ///Tag directive handle - prefix pairs.
+ TagDirective[] tagDirectives_;
+
+ ///Anchor/alias to process.
+ string preparedAnchor_ = null;
+ ///Tag to process.
+ string preparedTag_ = null;
+
+ ///Analysis result of the current scalar.
+ ScalarAnalysis analysis_;
+ ///Style of the current scalar.
+ ScalarStyle style_ = ScalarStyle.invalid;
+
+ public:
+ @disable int opCmp(ref Emitter);
+ @disable bool opEquals(ref Emitter);
+
+ /**
+ * Construct an emitter.
+ *
+ * Params: stream = Output range to write to.
+ * canonical = Write scalars in canonical form?
+ * indent = Indentation width.
+ * lineBreak = Line break character/s.
+ */
+ this(Range stream, const bool canonical, const int indent, const int width,
+ const LineBreak lineBreak) @safe
+ {
+ states_.reserve(32);
+ indents_.reserve(32);
+ stream_ = stream;
+ canonical_ = canonical;
+ nextExpected!"expectStreamStart"();
+
+ if(indent > 1 && indent < 10){bestIndent_ = indent;}
+ if(width > bestIndent_ * 2) {bestWidth_ = width;}
+ bestLineBreak_ = lineBreak;
+
+ analysis_.flags.isNull = true;
+ }
+
+ ///Emit an event.
+ void emit(Event event) @safe
+ {
+ events_.push(event);
+ while(!needMoreEvents())
+ {
+ event_ = events_.pop();
+ callNext();
+ event_.destroy();
+ }
+ }
+
+ private:
+ ///Pop and return the newest state in states_.
+ EmitterFunction popState() @safe
+ in(states_.data.length > 0,
+ "Emitter: Need to pop a state but there are no states left")
+ {
+ const result = states_.data[$-1];
+ states_.shrinkTo(states_.data.length - 1);
+ return result;
+ }
+
+ void pushState(string D)() @safe
+ {
+ states_ ~= mixin("function(typeof(this)* self) { self."~D~"(); }");
+ }
+
+ ///Pop and return the newest indent in indents_.
+ int popIndent() @safe
+ in(indents_.data.length > 0,
+ "Emitter: Need to pop an indent level but there" ~
+ " are no indent levels left")
+ {
+ const result = indents_.data[$-1];
+ indents_.shrinkTo(indents_.data.length - 1);
+ return result;
+ }
+
+ ///Write a string to the file/stream.
+ void writeString(const scope char[] str) @safe
+ {
+ static if(is(CharType == char))
+ {
+ copy(str, stream_);
+ }
+ static if(is(CharType == wchar))
+ {
+ const buffer = to!wstring(str);
+ copy(buffer, stream_);
+ }
+ static if(is(CharType == dchar))
+ {
+ const buffer = to!dstring(str);
+ copy(buffer, stream_);
+ }
+ }
+
+ ///In some cases, we wait for a few next events before emitting.
+ bool needMoreEvents() @safe nothrow
+ {
+ if(events_.length == 0){return true;}
+
+ const event = events_.peek();
+ if(event.id == EventID.documentStart){return needEvents(1);}
+ if(event.id == EventID.sequenceStart){return needEvents(2);}
+ if(event.id == EventID.mappingStart) {return needEvents(3);}
+
+ return false;
+ }
+
+ ///Determines if we need specified number of more events.
+ bool needEvents(in uint count) @safe nothrow
+ {
+ int level;
+
+ foreach(const event; events_.range)
+ {
+ if(event.id.among!(EventID.documentStart, EventID.sequenceStart, EventID.mappingStart)) {++level;}
+ else if(event.id.among!(EventID.documentEnd, EventID.sequenceEnd, EventID.mappingEnd)) {--level;}
+ else if(event.id == EventID.streamStart){level = -1;}
+
+ if(level < 0)
+ {
+ return false;
+ }
+ }
+
+ return events_.length < (count + 1);
+ }
+
+ ///Increase indentation level.
+ void increaseIndent(const Flag!"flow" flow = No.flow, const bool indentless = false) @safe
+ {
+ indents_ ~= indent_;
+ if(indent_ == -1)
+ {
+ indent_ = flow ? bestIndent_ : 0;
+ }
+ else if(!indentless)
+ {
+ indent_ += bestIndent_;
+ }
+ }
+
+ ///Determines if the type of current event is as specified. Throws if no event.
+ bool eventTypeIs(in EventID id) const pure @safe
+ in(!event_.isNull, "Expected an event, but no event is available.")
+ {
+ return event_.id == id;
+ }
+
+
+ //States.
+
+
+ //Stream handlers.
+
+ ///Handle start of a file/stream.
+ void expectStreamStart() @safe
+ in(eventTypeIs(EventID.streamStart),
+ "Expected streamStart, but got " ~ event_.idString)
+ {
+
+ writeStreamStart();
+ nextExpected!"expectDocumentStart!(Yes.first)"();
+ }
+
+ ///Expect nothing, throwing if we still have something.
+ void expectNothing() @safe
+ {
+ assert(0, "Expected nothing, but got " ~ event_.idString);
+ }
+
+ //Document handlers.
+
+ ///Handle start of a document.
+ void expectDocumentStart(Flag!"first" first)() @safe
+ in(eventTypeIs(EventID.documentStart) || eventTypeIs(EventID.streamEnd),
+ "Expected documentStart or streamEnd, but got " ~ event_.idString)
+ {
+
+ if(event_.id == EventID.documentStart)
+ {
+ const YAMLVersion = event_.value;
+ auto tagDirectives = event_.tagDirectives;
+ if(openEnded_ && (YAMLVersion !is null || tagDirectives !is null))
+ {
+ writeIndicator("...", Yes.needWhitespace);
+ writeIndent();
+ }
+
+ if(YAMLVersion !is null)
+ {
+ writeVersionDirective(prepareVersion(YAMLVersion));
+ }
+
+ if(tagDirectives !is null)
+ {
+ tagDirectives_ = tagDirectives;
+ sort!"icmp(a.handle, b.handle) < 0"(tagDirectives_);
+
+ foreach(ref pair; tagDirectives_)
+ {
+ writeTagDirective(prepareTagHandle(pair.handle),
+ prepareTagPrefix(pair.prefix));
+ }
+ }
+
+ bool eq(ref TagDirective a, ref TagDirective b){return a.handle == b.handle;}
+ //Add any default tag directives that have not been overriden.
+ foreach(ref def; defaultTagDirectives_)
+ {
+ if(!std.algorithm.canFind!eq(tagDirectives_, def))
+ {
+ tagDirectives_ ~= def;
+ }
+ }
+
+ const implicit = first && !event_.explicitDocument && !canonical_ &&
+ YAMLVersion is null && tagDirectives is null &&
+ !checkEmptyDocument();
+ if(!implicit)
+ {
+ writeIndent();
+ writeIndicator("---", Yes.needWhitespace);
+ if(canonical_){writeIndent();}
+ }
+ nextExpected!"expectRootNode"();
+ }
+ else if(event_.id == EventID.streamEnd)
+ {
+ if(openEnded_)
+ {
+ writeIndicator("...", Yes.needWhitespace);
+ writeIndent();
+ }
+ writeStreamEnd();
+ nextExpected!"expectNothing"();
+ }
+ }
+
+ ///Handle end of a document.
+ void expectDocumentEnd() @safe
+ in(eventTypeIs(EventID.documentEnd),
+ "Expected DocumentEnd, but got " ~ event_.idString)
+ {
+
+ writeIndent();
+ if(event_.explicitDocument)
+ {
+ writeIndicator("...", Yes.needWhitespace);
+ writeIndent();
+ }
+ nextExpected!"expectDocumentStart!(No.first)"();
+ }
+
+ ///Handle the root node of a document.
+ void expectRootNode() @safe
+ {
+ pushState!"expectDocumentEnd"();
+ expectNode(Context.root);
+ }
+
+ ///Handle a mapping node.
+ //
+ //Params: simpleKey = Are we in a simple key?
+ void expectMappingNode(const bool simpleKey = false) @safe
+ {
+ expectNode(simpleKey ? Context.mappingSimpleKey : Context.mappingNoSimpleKey);
+ }
+
+ ///Handle a sequence node.
+ void expectSequenceNode() @safe
+ {
+ expectNode(Context.sequence);
+ }
+
+ ///Handle a new node. Context specifies where in the document we are.
+ void expectNode(const Context context) @safe
+ {
+ context_ = context;
+
+ const flowCollection = event_.collectionStyle == CollectionStyle.flow;
+
+ switch(event_.id)
+ {
+ case EventID.alias_: expectAlias(); break;
+ case EventID.scalar:
+ processAnchor("&");
+ processTag();
+ expectScalar();
+ break;
+ case EventID.sequenceStart:
+ processAnchor("&");
+ processTag();
+ if(flowLevel_ > 0 || canonical_ || flowCollection || checkEmptySequence())
+ {
+ expectFlowSequence();
+ }
+ else
+ {
+ expectBlockSequence();
+ }
+ break;
+ case EventID.mappingStart:
+ processAnchor("&");
+ processTag();
+ if(flowLevel_ > 0 || canonical_ || flowCollection || checkEmptyMapping())
+ {
+ expectFlowMapping();
+ }
+ else
+ {
+ expectBlockMapping();
+ }
+ break;
+ default:
+ assert(0, "Expected alias_, scalar, sequenceStart or " ~
+ "mappingStart, but got: " ~ event_.idString);
+ }
+ }
+ ///Handle an alias.
+ void expectAlias() @safe
+ in(event_.anchor != "", "Anchor is not specified for alias")
+ {
+ processAnchor("*");
+ nextExpected(popState());
+ }
+
+ ///Handle a scalar.
+ void expectScalar() @safe
+ {
+ increaseIndent(Yes.flow);
+ processScalar();
+ indent_ = popIndent();
+ nextExpected(popState());
+ }
+
+ //Flow sequence handlers.
+
+ ///Handle a flow sequence.
+ void expectFlowSequence() @safe
+ {
+ writeIndicator("[", Yes.needWhitespace, Yes.whitespace);
+ ++flowLevel_;
+ increaseIndent(Yes.flow);
+ nextExpected!"expectFlowSequenceItem!(Yes.first)"();
+ }
+
+ ///Handle a flow sequence item.
+ void expectFlowSequenceItem(Flag!"first" first)() @safe
+ {
+ if(event_.id == EventID.sequenceEnd)
+ {
+ indent_ = popIndent();
+ --flowLevel_;
+ static if(!first) if(canonical_)
+ {
+ writeIndicator(",", No.needWhitespace);
+ writeIndent();
+ }
+ writeIndicator("]", No.needWhitespace);
+ nextExpected(popState());
+ return;
+ }
+ static if(!first){writeIndicator(",", No.needWhitespace);}
+ if(canonical_ || column_ > bestWidth_){writeIndent();}
+ pushState!"expectFlowSequenceItem!(No.first)"();
+ expectSequenceNode();
+ }
+
+ //Flow mapping handlers.
+
+ ///Handle a flow mapping.
+ void expectFlowMapping() @safe
+ {
+ writeIndicator("{", Yes.needWhitespace, Yes.whitespace);
+ ++flowLevel_;
+ increaseIndent(Yes.flow);
+ nextExpected!"expectFlowMappingKey!(Yes.first)"();
+ }
+
+ ///Handle a key in a flow mapping.
+ void expectFlowMappingKey(Flag!"first" first)() @safe
+ {
+ if(event_.id == EventID.mappingEnd)
+ {
+ indent_ = popIndent();
+ --flowLevel_;
+ static if (!first) if(canonical_)
+ {
+ writeIndicator(",", No.needWhitespace);
+ writeIndent();
+ }
+ writeIndicator("}", No.needWhitespace);
+ nextExpected(popState());
+ return;
+ }
+
+ static if(!first){writeIndicator(",", No.needWhitespace);}
+ if(canonical_ || column_ > bestWidth_){writeIndent();}
+ if(!canonical_ && checkSimpleKey())
+ {
+ pushState!"expectFlowMappingSimpleValue"();
+ expectMappingNode(true);
+ return;
+ }
+
+ writeIndicator("?", Yes.needWhitespace);
+ pushState!"expectFlowMappingValue"();
+ expectMappingNode();
+ }
+
+ ///Handle a simple value in a flow mapping.
+ void expectFlowMappingSimpleValue() @safe
+ {
+ writeIndicator(":", No.needWhitespace);
+ pushState!"expectFlowMappingKey!(No.first)"();
+ expectMappingNode();
+ }
+
+ ///Handle a complex value in a flow mapping.
+ void expectFlowMappingValue() @safe
+ {
+ if(canonical_ || column_ > bestWidth_){writeIndent();}
+ writeIndicator(":", Yes.needWhitespace);
+ pushState!"expectFlowMappingKey!(No.first)"();
+ expectMappingNode();
+ }
+
+ //Block sequence handlers.
+
+ ///Handle a block sequence.
+ void expectBlockSequence() @safe
+ {
+ const indentless = (context_ == Context.mappingNoSimpleKey ||
+ context_ == Context.mappingSimpleKey) && !indentation_;
+ increaseIndent(No.flow, indentless);
+ nextExpected!"expectBlockSequenceItem!(Yes.first)"();
+ }
+
+ ///Handle a block sequence item.
+ void expectBlockSequenceItem(Flag!"first" first)() @safe
+ {
+ static if(!first) if(event_.id == EventID.sequenceEnd)
+ {
+ indent_ = popIndent();
+ nextExpected(popState());
+ return;
+ }
+
+ writeIndent();
+ writeIndicator("-", Yes.needWhitespace, No.whitespace, Yes.indentation);
+ pushState!"expectBlockSequenceItem!(No.first)"();
+ expectSequenceNode();
+ }
+
+ //Block mapping handlers.
+
+ ///Handle a block mapping.
+ void expectBlockMapping() @safe
+ {
+ increaseIndent(No.flow);
+ nextExpected!"expectBlockMappingKey!(Yes.first)"();
+ }
+
+ ///Handle a key in a block mapping.
+ void expectBlockMappingKey(Flag!"first" first)() @safe
+ {
+ static if(!first) if(event_.id == EventID.mappingEnd)
+ {
+ indent_ = popIndent();
+ nextExpected(popState());
+ return;
+ }
+
+ writeIndent();
+ if(checkSimpleKey())
+ {
+ pushState!"expectBlockMappingSimpleValue"();
+ expectMappingNode(true);
+ return;
+ }
+
+ writeIndicator("?", Yes.needWhitespace, No.whitespace, Yes.indentation);
+ pushState!"expectBlockMappingValue"();
+ expectMappingNode();
+ }
+
+ ///Handle a simple value in a block mapping.
+ void expectBlockMappingSimpleValue() @safe
+ {
+ writeIndicator(":", No.needWhitespace);
+ pushState!"expectBlockMappingKey!(No.first)"();
+ expectMappingNode();
+ }
+
+ ///Handle a complex value in a block mapping.
+ void expectBlockMappingValue() @safe
+ {
+ writeIndent();
+ writeIndicator(":", Yes.needWhitespace, No.whitespace, Yes.indentation);
+ pushState!"expectBlockMappingKey!(No.first)"();
+ expectMappingNode();
+ }
+
+ //Checkers.
+
+ ///Check if an empty sequence is next.
+ bool checkEmptySequence() const @safe pure nothrow
+ {
+ return event_.id == EventID.sequenceStart && events_.length > 0
+ && events_.peek().id == EventID.sequenceEnd;
+ }
+
+ ///Check if an empty mapping is next.
+ bool checkEmptyMapping() const @safe pure nothrow
+ {
+ return event_.id == EventID.mappingStart && events_.length > 0
+ && events_.peek().id == EventID.mappingEnd;
+ }
+
+ ///Check if an empty document is next.
+ bool checkEmptyDocument() const @safe pure nothrow
+ {
+ if(event_.id != EventID.documentStart || events_.length == 0)
+ {
+ return false;
+ }
+
+ const event = events_.peek();
+ const emptyScalar = event.id == EventID.scalar && (event.anchor is null) &&
+ (event.tag is null) && event.implicit && event.value == "";
+ return emptyScalar;
+ }
+
+ ///Check if a simple key is next.
+ bool checkSimpleKey() @safe
+ {
+ uint length;
+ const id = event_.id;
+ const scalar = id == EventID.scalar;
+ const collectionStart = id == EventID.mappingStart ||
+ id == EventID.sequenceStart;
+
+ if((id == EventID.alias_ || scalar || collectionStart)
+ && (event_.anchor !is null))
+ {
+ if(preparedAnchor_ is null)
+ {
+ preparedAnchor_ = prepareAnchor(event_.anchor);
+ }
+ length += preparedAnchor_.length;
+ }
+
+ if((scalar || collectionStart) && (event_.tag !is null))
+ {
+ if(preparedTag_ is null){preparedTag_ = prepareTag(event_.tag);}
+ length += preparedTag_.length;
+ }
+
+ if(scalar)
+ {
+ if(analysis_.flags.isNull){analysis_ = analyzeScalar(event_.value);}
+ length += analysis_.scalar.length;
+ }
+
+ if(length >= 128){return false;}
+
+ return id == EventID.alias_ ||
+ (scalar && !analysis_.flags.empty && !analysis_.flags.multiline) ||
+ checkEmptySequence() ||
+ checkEmptyMapping();
+ }
+
+ ///Process and write a scalar.
+ void processScalar() @safe
+ {
+ if(analysis_.flags.isNull){analysis_ = analyzeScalar(event_.value);}
+ if(style_ == ScalarStyle.invalid)
+ {
+ style_ = chooseScalarStyle();
+ }
+
+ //if(analysis_.flags.multiline && (context_ != Context.mappingSimpleKey) &&
+ // ([ScalarStyle.invalid, ScalarStyle.plain, ScalarStyle.singleQuoted, ScalarStyle.doubleQuoted)
+ // .canFind(style_))
+ //{
+ // writeIndent();
+ //}
+ auto writer = ScalarWriter!(Range, CharType)(&this, analysis_.scalar,
+ context_ != Context.mappingSimpleKey);
+ with(writer) final switch(style_)
+ {
+ case ScalarStyle.invalid: assert(false);
+ case ScalarStyle.doubleQuoted: writeDoubleQuoted(); break;
+ case ScalarStyle.singleQuoted: writeSingleQuoted(); break;
+ case ScalarStyle.folded: writeFolded(); break;
+ case ScalarStyle.literal: writeLiteral(); break;
+ case ScalarStyle.plain: writePlain(); break;
+ }
+ analysis_.flags.isNull = true;
+ style_ = ScalarStyle.invalid;
+ }
+
+ ///Process and write an anchor/alias.
+ void processAnchor(const string indicator) @safe
+ {
+ if(event_.anchor is null)
+ {
+ preparedAnchor_ = null;
+ return;
+ }
+ if(preparedAnchor_ is null)
+ {
+ preparedAnchor_ = prepareAnchor(event_.anchor);
+ }
+ if(preparedAnchor_ !is null && preparedAnchor_ != "")
+ {
+ writeIndicator(indicator, Yes.needWhitespace);
+ writeString(preparedAnchor_);
+ }
+ preparedAnchor_ = null;
+ }
+
+ ///Process and write a tag.
+ void processTag() @safe
+ {
+ string tag = event_.tag;
+
+ if(event_.id == EventID.scalar)
+ {
+ if(style_ == ScalarStyle.invalid){style_ = chooseScalarStyle();}
+ if((!canonical_ || (tag is null)) &&
+ (style_ == ScalarStyle.plain ? event_.implicit : !event_.implicit && (tag is null)))
+ {
+ preparedTag_ = null;
+ return;
+ }
+ if(event_.implicit && (tag is null))
+ {
+ tag = "!";
+ preparedTag_ = null;
+ }
+ }
+ else if((!canonical_ || (tag is null)) && event_.implicit)
+ {
+ preparedTag_ = null;
+ return;
+ }
+
+ assert(tag != "", "Tag is not specified");
+ if(preparedTag_ is null){preparedTag_ = prepareTag(tag);}
+ if(preparedTag_ !is null && preparedTag_ != "")
+ {
+ writeIndicator(preparedTag_, Yes.needWhitespace);
+ }
+ preparedTag_ = null;
+ }
+
+ ///Determine style to write the current scalar in.
+ ScalarStyle chooseScalarStyle() @safe
+ {
+ if(analysis_.flags.isNull){analysis_ = analyzeScalar(event_.value);}
+
+ const style = event_.scalarStyle;
+ const invalidOrPlain = style == ScalarStyle.invalid || style == ScalarStyle.plain;
+ const block = style == ScalarStyle.literal || style == ScalarStyle.folded;
+ const singleQuoted = style == ScalarStyle.singleQuoted;
+ const doubleQuoted = style == ScalarStyle.doubleQuoted;
+
+ const allowPlain = flowLevel_ > 0 ? analysis_.flags.allowFlowPlain
+ : analysis_.flags.allowBlockPlain;
+ //simple empty or multiline scalars can't be written in plain style
+ const simpleNonPlain = (context_ == Context.mappingSimpleKey) &&
+ (analysis_.flags.empty || analysis_.flags.multiline);
+
+ if(doubleQuoted || canonical_)
+ {
+ return ScalarStyle.doubleQuoted;
+ }
+
+ if(invalidOrPlain && event_.implicit && !simpleNonPlain && allowPlain)
+ {
+ return ScalarStyle.plain;
+ }
+
+ if(block && flowLevel_ == 0 && context_ != Context.mappingSimpleKey &&
+ analysis_.flags.allowBlock)
+ {
+ return style;
+ }
+
+ if((invalidOrPlain || singleQuoted) &&
+ analysis_.flags.allowSingleQuoted &&
+ !(context_ == Context.mappingSimpleKey && analysis_.flags.multiline))
+ {
+ return ScalarStyle.singleQuoted;
+ }
+
+ return ScalarStyle.doubleQuoted;
+ }
+
+ ///Prepare YAML version string for output.
+ static string prepareVersion(const string YAMLVersion) @safe
+ in(YAMLVersion.split(".")[0] == "1",
+ "Unsupported YAML version: " ~ YAMLVersion)
+ {
+ return YAMLVersion;
+ }
+
+ ///Encode an Unicode character for tag directive and write it to writer.
+ static void encodeChar(Writer)(ref Writer writer, in dchar c) @safe
+ {
+ char[4] data;
+ const bytes = encode(data, c);
+ //For each byte add string in format %AB , where AB are hex digits of the byte.
+ foreach(const char b; data[0 .. bytes])
+ {
+ formattedWrite(writer, "%%%02X", cast(ubyte)b);
+ }
+ }
+
+ ///Prepare tag directive handle for output.
+ static string prepareTagHandle(const string handle) @safe
+ in(handle != "", "Tag handle must not be empty")
+ in(handle.drop(1).dropBack(1).all!(c => isAlphaNum(c) || c.among!('-', '_')),
+ "Tag handle contains invalid characters")
+ {
+ return handle;
+ }
+
+ ///Prepare tag directive prefix for output.
+ static string prepareTagPrefix(const string prefix) @safe
+ in(prefix != "", "Tag prefix must not be empty")
+ {
+ auto appender = appender!string();
+ const int offset = prefix[0] == '!';
+ size_t start, end;
+
+ foreach(const size_t i, const dchar c; prefix)
+ {
+ const size_t idx = i + offset;
+ if(isAlphaNum(c) || c.among!('-', ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '!', '~', '*', '\\', '\'', '(', ')', '[', ']', '%'))
+ {
+ end = idx + 1;
+ continue;
+ }
+
+ if(start < idx){appender.put(prefix[start .. idx]);}
+ start = end = idx + 1;
+
+ encodeChar(appender, c);
+ }
+
+ end = min(end, prefix.length);
+ if(start < end){appender.put(prefix[start .. end]);}
+ return appender.data;
+ }
+
+ ///Prepare tag for output.
+ string prepareTag(in string tag) @safe
+ in(tag != "", "Tag must not be empty")
+ {
+
+ string tagString = tag;
+ if(tagString == "!"){return tagString;}
+ string handle;
+ string suffix = tagString;
+
+ //Sort lexicographically by prefix.
+ sort!"icmp(a.prefix, b.prefix) < 0"(tagDirectives_);
+ foreach(ref pair; tagDirectives_)
+ {
+ auto prefix = pair.prefix;
+ if(tagString.startsWith(prefix) &&
+ (prefix != "!" || prefix.length < tagString.length))
+ {
+ handle = pair.handle;
+ suffix = tagString[prefix.length .. $];
+ }
+ }
+
+ auto appender = appender!string();
+ appender.put(handle !is null && handle != "" ? handle : "!<");
+ size_t start, end;
+ foreach(const dchar c; suffix)
+ {
+ if(isAlphaNum(c) || c.among!('-', ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\\', '\'', '(', ')', '[', ']') ||
+ (c == '!' && handle != "!"))
+ {
+ ++end;
+ continue;
+ }
+ if(start < end){appender.put(suffix[start .. end]);}
+ start = end = end + 1;
+
+ encodeChar(appender, c);
+ }
+
+ if(start < end){appender.put(suffix[start .. end]);}
+ if(handle is null || handle == ""){appender.put(">");}
+
+ return appender.data;
+ }
+
+ ///Prepare anchor for output.
+ static string prepareAnchor(const string anchor) @safe
+ in(anchor != "", "Anchor must not be empty")
+ in(anchor.all!(c => isAlphaNum(c) || c.among!('-', '_')), "Anchor contains invalid characters")
+ {
+ return anchor;
+ }
+
+ ///Analyze specifed scalar and return the analysis result.
+ static ScalarAnalysis analyzeScalar(string scalar) @safe
+ {
+ ScalarAnalysis analysis;
+ analysis.flags.isNull = false;
+ analysis.scalar = scalar;
+
+ //Empty scalar is a special case.
+ if(scalar is null || scalar == "")
+ {
+ with(ScalarAnalysis.AnalysisFlags)
+ analysis.flags =
+ empty |
+ allowBlockPlain |
+ allowSingleQuoted |
+ allowDoubleQuoted;
+ return analysis;
+ }
+
+ //Indicators and special characters (All false by default).
+ bool blockIndicators, flowIndicators, lineBreaks, specialCharacters;
+
+ //Important whitespace combinations (All false by default).
+ bool leadingSpace, leadingBreak, trailingSpace, trailingBreak,
+ breakSpace, spaceBreak;
+
+ //Check document indicators.
+ if(scalar.startsWith("---", "..."))
+ {
+ blockIndicators = flowIndicators = true;
+ }
+
+ //First character or preceded by a whitespace.
+ bool preceededByWhitespace = true;
+
+ //Last character or followed by a whitespace.
+ bool followedByWhitespace = scalar.length == 1 ||
+ scalar[1].among!(' ', '\t', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029');
+
+ //The previous character is a space/break (false by default).
+ bool previousSpace, previousBreak;
+
+ foreach(const size_t index, const dchar c; scalar)
+ {
+ //Check for indicators.
+ if(index == 0)
+ {
+ //Leading indicators are special characters.
+ if(c.isSpecialChar)
+ {
+ flowIndicators = blockIndicators = true;
+ }
+ if(':' == c || '?' == c)
+ {
+ flowIndicators = true;
+ if(followedByWhitespace){blockIndicators = true;}
+ }
+ if(c == '-' && followedByWhitespace)
+ {
+ flowIndicators = blockIndicators = true;
+ }
+ }
+ else
+ {
+ //Some indicators cannot appear within a scalar as well.
+ if(c.isFlowIndicator){flowIndicators = true;}
+ if(c == ':')
+ {
+ flowIndicators = true;
+ if(followedByWhitespace){blockIndicators = true;}
+ }
+ if(c == '#' && preceededByWhitespace)
+ {
+ flowIndicators = blockIndicators = true;
+ }
+ }
+
+ //Check for line breaks, special, and unicode characters.
+ if(c.isNewLine){lineBreaks = true;}
+ if(!(c == '\n' || (c >= '\x20' && c <= '\x7E')) &&
+ !((c == '\u0085' || (c >= '\xA0' && c <= '\uD7FF') ||
+ (c >= '\uE000' && c <= '\uFFFD')) && c != '\uFEFF'))
+ {
+ specialCharacters = true;
+ }
+
+ //Detect important whitespace combinations.
+ if(c == ' ')
+ {
+ if(index == 0){leadingSpace = true;}
+ if(index == scalar.length - 1){trailingSpace = true;}
+ if(previousBreak){breakSpace = true;}
+ previousSpace = true;
+ previousBreak = false;
+ }
+ else if(c.isNewLine)
+ {
+ if(index == 0){leadingBreak = true;}
+ if(index == scalar.length - 1){trailingBreak = true;}
+ if(previousSpace){spaceBreak = true;}
+ previousSpace = false;
+ previousBreak = true;
+ }
+ else
+ {
+ previousSpace = previousBreak = false;
+ }
+
+ //Prepare for the next character.
+ preceededByWhitespace = c.isSpace != 0;
+ followedByWhitespace = index + 2 >= scalar.length ||
+ scalar[index + 2].isSpace;
+ }
+
+ with(ScalarAnalysis.AnalysisFlags)
+ {
+ //Let's decide what styles are allowed.
+ analysis.flags |= allowFlowPlain | allowBlockPlain | allowSingleQuoted |
+ allowDoubleQuoted | allowBlock;
+
+ //Leading and trailing whitespaces are bad for plain scalars.
+ if(leadingSpace || leadingBreak || trailingSpace || trailingBreak)
+ {
+ analysis.flags &= ~(allowFlowPlain | allowBlockPlain);
+ }
+
+ //We do not permit trailing spaces for block scalars.
+ if(trailingSpace)
+ {
+ analysis.flags &= ~allowBlock;
+ }
+
+ //Spaces at the beginning of a new line are only acceptable for block
+ //scalars.
+ if(breakSpace)
+ {
+ analysis.flags &= ~(allowFlowPlain | allowBlockPlain | allowSingleQuoted);
+ }
+
+ //Spaces followed by breaks, as well as special character are only
+ //allowed for double quoted scalars.
+ if(spaceBreak || specialCharacters)
+ {
+ analysis.flags &= ~(allowFlowPlain | allowBlockPlain | allowSingleQuoted | allowBlock);
+ }
+
+ //Although the plain scalar writer supports breaks, we never emit
+ //multiline plain scalars.
+ if(lineBreaks)
+ {
+ analysis.flags &= ~(allowFlowPlain | allowBlockPlain);
+ analysis.flags |= multiline;
+ }
+
+ //Flow indicators are forbidden for flow plain scalars.
+ if(flowIndicators)
+ {
+ analysis.flags &= ~allowFlowPlain;
+ }
+
+ //Block indicators are forbidden for block plain scalars.
+ if(blockIndicators)
+ {
+ analysis.flags &= ~allowBlockPlain;
+ }
+ }
+ return analysis;
+ }
+
+ @safe unittest
+ {
+ with(analyzeScalar("").flags)
+ {
+ // workaround for empty being std.range.primitives.empty here
+ alias empty = ScalarAnalysis.AnalysisFlags.empty;
+ assert(empty && allowBlockPlain && allowSingleQuoted && allowDoubleQuoted);
+ }
+ with(analyzeScalar("a").flags)
+ {
+ assert(allowFlowPlain && allowBlockPlain && allowSingleQuoted && allowDoubleQuoted && allowBlock);
+ }
+ with(analyzeScalar(" ").flags)
+ {
+ assert(allowSingleQuoted && allowDoubleQuoted);
+ }
+ with(analyzeScalar(" a").flags)
+ {
+ assert(allowSingleQuoted && allowDoubleQuoted);
+ }
+ with(analyzeScalar("a ").flags)
+ {
+ assert(allowSingleQuoted && allowDoubleQuoted);
+ }
+ with(analyzeScalar("\na").flags)
+ {
+ assert(allowSingleQuoted && allowDoubleQuoted);
+ }
+ with(analyzeScalar("a\n").flags)
+ {
+ assert(allowSingleQuoted && allowDoubleQuoted);
+ }
+ with(analyzeScalar("\n").flags)
+ {
+ assert(multiline && allowSingleQuoted && allowDoubleQuoted && allowBlock);
+ }
+ with(analyzeScalar(" \n").flags)
+ {
+ assert(multiline && allowDoubleQuoted);
+ }
+ with(analyzeScalar("\n a").flags)
+ {
+ assert(multiline && allowDoubleQuoted && allowBlock);
+ }
+ }
+
+ //Writers.
+
+ ///Start the YAML stream (write the unicode byte order mark).
+ void writeStreamStart() @safe
+ {
+ //Write BOM (except for UTF-8)
+ static if(is(CharType == wchar) || is(CharType == dchar))
+ {
+ stream_.put(cast(CharType)'\uFEFF');
+ }
+ }
+
+ ///End the YAML stream.
+ void writeStreamEnd() @safe {}
+
+ ///Write an indicator (e.g. ":", "[", ">", etc.).
+ void writeIndicator(const scope char[] indicator,
+ const Flag!"needWhitespace" needWhitespace,
+ const Flag!"whitespace" whitespace = No.whitespace,
+ const Flag!"indentation" indentation = No.indentation) @safe
+ {
+ const bool prefixSpace = !whitespace_ && needWhitespace;
+ whitespace_ = whitespace;
+ indentation_ = indentation_ && indentation;
+ openEnded_ = false;
+ column_ += indicator.length;
+ if(prefixSpace)
+ {
+ ++column_;
+ writeString(" ");
+ }
+ writeString(indicator);
+ }
+
+ ///Write indentation.
+ void writeIndent() @safe
+ {
+ const indent = indent_ == -1 ? 0 : indent_;
+
+ if(!indentation_ || column_ > indent || (column_ == indent && !whitespace_))
+ {
+ writeLineBreak();
+ }
+ if(column_ < indent)
+ {
+ whitespace_ = true;
+
+ //Used to avoid allocation of arbitrary length strings.
+ static immutable spaces = " ";
+ size_t numSpaces = indent - column_;
+ column_ = indent;
+ while(numSpaces >= spaces.length)
+ {
+ writeString(spaces);
+ numSpaces -= spaces.length;
+ }
+ writeString(spaces[0 .. numSpaces]);
+ }
+ }
+
+ ///Start new line.
+ void writeLineBreak(const scope char[] data = null) @safe
+ {
+ whitespace_ = indentation_ = true;
+ ++line_;
+ column_ = 0;
+ writeString(data is null ? lineBreak(bestLineBreak_) : data);
+ }
+
+ ///Write a YAML version directive.
+ void writeVersionDirective(const string versionText) @safe
+ {
+ writeString("%YAML ");
+ writeString(versionText);
+ writeLineBreak();
+ }
+
+ ///Write a tag directive.
+ void writeTagDirective(const string handle, const string prefix) @safe
+ {
+ writeString("%TAG ");
+ writeString(handle);
+ writeString(" ");
+ writeString(prefix);
+ writeLineBreak();
+ }
+ void nextExpected(string D)() @safe
+ {
+ state_ = mixin("function(typeof(this)* self) { self."~D~"(); }");
+ }
+ void nextExpected(EmitterFunction f) @safe
+ {
+ state_ = f;
+ }
+ void callNext() @safe
+ {
+ state_(&this);
+ }
+}
+
+
+private:
+
+///RAII struct used to write out scalar values.
+struct ScalarWriter(Range, CharType)
+{
+ invariant()
+ {
+ assert(emitter_.bestIndent_ > 0 && emitter_.bestIndent_ < 10,
+ "Emitter bestIndent must be 1 to 9 for one-character indent hint");
+ }
+
+ private:
+ @disable int opCmp(ref Emitter!(Range, CharType));
+ @disable bool opEquals(ref Emitter!(Range, CharType));
+
+ ///Used as "null" UTF-32 character.
+ static immutable dcharNone = dchar.max;
+
+ ///Emitter used to emit the scalar.
+ Emitter!(Range, CharType)* emitter_;
+
+ ///UTF-8 encoded text of the scalar to write.
+ string text_;
+
+ ///Can we split the scalar into multiple lines?
+ bool split_;
+ ///Are we currently going over spaces in the text?
+ bool spaces_;
+ ///Are we currently going over line breaks in the text?
+ bool breaks_;
+
+ ///Start and end byte of the text range we're currently working with.
+ size_t startByte_, endByte_;
+ ///End byte of the text range including the currently processed character.
+ size_t nextEndByte_;
+ ///Start and end character of the text range we're currently working with.
+ long startChar_, endChar_;
+
+ public:
+ ///Construct a ScalarWriter using emitter to output text.
+ this(Emitter!(Range, CharType)* emitter, string text, const bool split = true) @safe nothrow
+ {
+ emitter_ = emitter;
+ text_ = text;
+ split_ = split;
+ }
+
+ ///Write text as single quoted scalar.
+ void writeSingleQuoted() @safe
+ {
+ emitter_.writeIndicator("\'", Yes.needWhitespace);
+ spaces_ = breaks_ = false;
+ resetTextPosition();
+
+ do
+ {
+ const dchar c = nextChar();
+ if(spaces_)
+ {
+ if(c != ' ' && tooWide() && split_ &&
+ startByte_ != 0 && endByte_ != text_.length)
+ {
+ writeIndent(Flag!"ResetSpace".no);
+ updateRangeStart();
+ }
+ else if(c != ' ')
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ }
+ }
+ else if(breaks_)
+ {
+ if(!c.isNewLine)
+ {
+ writeStartLineBreak();
+ writeLineBreaks();
+ emitter_.writeIndent();
+ }
+ }
+ else if((c == dcharNone || c == '\'' || c == ' ' || c.isNewLine)
+ && startChar_ < endChar_)
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ }
+ if(c == '\'')
+ {
+ emitter_.column_ += 2;
+ emitter_.writeString("\'\'");
+ startByte_ = endByte_ + 1;
+ startChar_ = endChar_ + 1;
+ }
+ updateBreaks(c, Flag!"UpdateSpaces".yes);
+ }while(endByte_ < text_.length);
+
+ emitter_.writeIndicator("\'", No.needWhitespace);
+ }
+
+ ///Write text as double quoted scalar.
+ void writeDoubleQuoted() @safe
+ {
+ resetTextPosition();
+ emitter_.writeIndicator("\"", Yes.needWhitespace);
+ do
+ {
+ const dchar c = nextChar();
+ //handle special characters
+ if(c == dcharNone || c.among!('\"', '\\', '\u0085', '\u2028', '\u2029', '\uFEFF') ||
+ !((c >= '\x20' && c <= '\x7E') ||
+ ((c >= '\xA0' && c <= '\uD7FF') || (c >= '\uE000' && c <= '\uFFFD'))))
+ {
+ if(startChar_ < endChar_)
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ }
+ if(c != dcharNone)
+ {
+ auto appender = appender!string();
+ if(const dchar es = toEscape(c))
+ {
+ appender.put('\\');
+ appender.put(es);
+ }
+ else
+ {
+ //Write an escaped Unicode character.
+ const format = c <= 255 ? "\\x%02X":
+ c <= 65535 ? "\\u%04X": "\\U%08X";
+ formattedWrite(appender, format, cast(uint)c);
+ }
+
+ emitter_.column_ += appender.data.length;
+ emitter_.writeString(appender.data);
+ startChar_ = endChar_ + 1;
+ startByte_ = nextEndByte_;
+ }
+ }
+ if((endByte_ > 0 && endByte_ < text_.length - strideBack(text_, text_.length))
+ && (c == ' ' || startChar_ >= endChar_)
+ && (emitter_.column_ + endChar_ - startChar_ > emitter_.bestWidth_)
+ && split_)
+ {
+ //text_[2:1] is ok in Python but not in D, so we have to use min()
+ emitter_.writeString(text_[min(startByte_, endByte_) .. endByte_]);
+ emitter_.writeString("\\");
+ emitter_.column_ += startChar_ - endChar_ + 1;
+ startChar_ = max(startChar_, endChar_);
+ startByte_ = max(startByte_, endByte_);
+
+ writeIndent(Flag!"ResetSpace".yes);
+ if(charAtStart() == ' ')
+ {
+ emitter_.writeString("\\");
+ ++emitter_.column_;
+ }
+ }
+ }while(endByte_ < text_.length);
+ emitter_.writeIndicator("\"", No.needWhitespace);
+ }
+
+ ///Write text as folded block scalar.
+ void writeFolded() @safe
+ {
+ initBlock('>');
+ bool leadingSpace = true;
+ spaces_ = false;
+ breaks_ = true;
+ resetTextPosition();
+
+ do
+ {
+ const dchar c = nextChar();
+ if(breaks_)
+ {
+ if(!c.isNewLine)
+ {
+ if(!leadingSpace && c != dcharNone && c != ' ')
+ {
+ writeStartLineBreak();
+ }
+ leadingSpace = (c == ' ');
+ writeLineBreaks();
+ if(c != dcharNone){emitter_.writeIndent();}
+ }
+ }
+ else if(spaces_)
+ {
+ if(c != ' ' && tooWide())
+ {
+ writeIndent(Flag!"ResetSpace".no);
+ updateRangeStart();
+ }
+ else if(c != ' ')
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ }
+ }
+ else if(c == dcharNone || c.isNewLine || c == ' ')
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ if(c == dcharNone){emitter_.writeLineBreak();}
+ }
+ updateBreaks(c, Flag!"UpdateSpaces".yes);
+ }while(endByte_ < text_.length);
+ }
+
+ ///Write text as literal block scalar.
+ void writeLiteral() @safe
+ {
+ initBlock('|');
+ breaks_ = true;
+ resetTextPosition();
+
+ do
+ {
+ const dchar c = nextChar();
+ if(breaks_)
+ {
+ if(!c.isNewLine)
+ {
+ writeLineBreaks();
+ if(c != dcharNone){emitter_.writeIndent();}
+ }
+ }
+ else if(c == dcharNone || c.isNewLine)
+ {
+ writeCurrentRange(Flag!"UpdateColumn".no);
+ if(c == dcharNone){emitter_.writeLineBreak();}
+ }
+ updateBreaks(c, Flag!"UpdateSpaces".no);
+ }while(endByte_ < text_.length);
+ }
+
+ ///Write text as plain scalar.
+ void writePlain() @safe
+ {
+ if(emitter_.context_ == Emitter!(Range, CharType).Context.root){emitter_.openEnded_ = true;}
+ if(text_ == ""){return;}
+ if(!emitter_.whitespace_)
+ {
+ ++emitter_.column_;
+ emitter_.writeString(" ");
+ }
+ emitter_.whitespace_ = emitter_.indentation_ = false;
+ spaces_ = breaks_ = false;
+ resetTextPosition();
+
+ do
+ {
+ const dchar c = nextChar();
+ if(spaces_)
+ {
+ if(c != ' ' && tooWide() && split_)
+ {
+ writeIndent(Flag!"ResetSpace".yes);
+ updateRangeStart();
+ }
+ else if(c != ' ')
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ }
+ }
+ else if(breaks_)
+ {
+ if(!c.isNewLine)
+ {
+ writeStartLineBreak();
+ writeLineBreaks();
+ writeIndent(Flag!"ResetSpace".yes);
+ }
+ }
+ else if(c == dcharNone || c.isNewLine || c == ' ')
+ {
+ writeCurrentRange(Flag!"UpdateColumn".yes);
+ }
+ updateBreaks(c, Flag!"UpdateSpaces".yes);
+ }while(endByte_ < text_.length);
+ }
+
+ private:
+ ///Get next character and move end of the text range to it.
+ @property dchar nextChar() pure @safe
+ {
+ ++endChar_;
+ endByte_ = nextEndByte_;
+ if(endByte_ >= text_.length){return dcharNone;}
+ const c = text_[nextEndByte_];
+ //c is ascii, no need to decode.
+ if(c < 0x80)
+ {
+ ++nextEndByte_;
+ return c;
+ }
+ return decode(text_, nextEndByte_);
+ }
+
+ ///Get character at start of the text range.
+ @property dchar charAtStart() const pure @safe
+ {
+ size_t idx = startByte_;
+ return decode(text_, idx);
+ }
+
+ ///Is the current line too wide?
+ @property bool tooWide() const pure @safe nothrow
+ {
+ return startChar_ + 1 == endChar_ &&
+ emitter_.column_ > emitter_.bestWidth_;
+ }
+
+ ///Determine hints (indicators) for block scalar.
+ size_t determineBlockHints(char[] hints, uint bestIndent) const pure @safe
+ {
+ size_t hintsIdx;
+ if(text_.length == 0)
+ return hintsIdx;
+
+ dchar lastChar(const string str, ref size_t end)
+ {
+ size_t idx = end = end - strideBack(str, end);
+ return decode(text_, idx);
+ }
+
+ size_t end = text_.length;
+ const last = lastChar(text_, end);
+ const secondLast = end > 0 ? lastChar(text_, end) : 0;
+
+ if(text_[0].isNewLine || text_[0] == ' ')
+ {
+ hints[hintsIdx++] = cast(char)('0' + bestIndent);
+ }
+ if(!last.isNewLine)
+ {
+ hints[hintsIdx++] = '-';
+ }
+ else if(std.utf.count(text_) == 1 || secondLast.isNewLine)
+ {
+ hints[hintsIdx++] = '+';
+ }
+ return hintsIdx;
+ }
+
+ ///Initialize for block scalar writing with specified indicator.
+ void initBlock(const char indicator) @safe
+ {
+ char[4] hints;
+ hints[0] = indicator;
+ const hintsLength = 1 + determineBlockHints(hints[1 .. $], emitter_.bestIndent_);
+ emitter_.writeIndicator(hints[0 .. hintsLength], Yes.needWhitespace);
+ if(hints.length > 0 && hints[$ - 1] == '+')
+ {
+ emitter_.openEnded_ = true;
+ }
+ emitter_.writeLineBreak();
+ }
+
+ ///Write out the current text range.
+ void writeCurrentRange(const Flag!"UpdateColumn" updateColumn) @safe
+ {
+ emitter_.writeString(text_[startByte_ .. endByte_]);
+ if(updateColumn){emitter_.column_ += endChar_ - startChar_;}
+ updateRangeStart();
+ }
+
+ ///Write line breaks in the text range.
+ void writeLineBreaks() @safe
+ {
+ foreach(const dchar br; text_[startByte_ .. endByte_])
+ {
+ if(br == '\n'){emitter_.writeLineBreak();}
+ else
+ {
+ char[4] brString;
+ const bytes = encode(brString, br);
+ emitter_.writeLineBreak(brString[0 .. bytes]);
+ }
+ }
+ updateRangeStart();
+ }
+
+ ///Write line break if start of the text range is a newline.
+ void writeStartLineBreak() @safe
+ {
+ if(charAtStart == '\n'){emitter_.writeLineBreak();}
+ }
+
+ ///Write indentation, optionally resetting whitespace/indentation flags.
+ void writeIndent(const Flag!"ResetSpace" resetSpace) @safe
+ {
+ emitter_.writeIndent();
+ if(resetSpace)
+ {
+ emitter_.whitespace_ = emitter_.indentation_ = false;
+ }
+ }
+
+ ///Move start of text range to its end.
+ void updateRangeStart() pure @safe nothrow
+ {
+ startByte_ = endByte_;
+ startChar_ = endChar_;
+ }
+
+ ///Update the line breaks_ flag, optionally updating the spaces_ flag.
+ void updateBreaks(in dchar c, const Flag!"UpdateSpaces" updateSpaces) pure @safe
+ {
+ if(c == dcharNone){return;}
+ breaks_ = (c.isNewLine != 0);
+ if(updateSpaces){spaces_ = c == ' ';}
+ }
+
+ ///Move to the beginning of text.
+ void resetTextPosition() pure @safe nothrow
+ {
+ startByte_ = endByte_ = nextEndByte_ = 0;
+ startChar_ = endChar_ = -1;
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/encoding.d b/src/ext_depends/D-YAML/source/dyaml/encoding.d
new file mode 100644
index 0000000..50c10b9
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/encoding.d
@@ -0,0 +1,11 @@
+// Copyright Ferdinand Majerech 2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.encoding;
+
+
+import tinyendian;
+
+alias Encoding = tinyendian.UTFEncoding;
diff --git a/src/ext_depends/D-YAML/source/dyaml/escapes.d b/src/ext_depends/D-YAML/source/dyaml/escapes.d
new file mode 100644
index 0000000..32080a2
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/escapes.d
@@ -0,0 +1,92 @@
+
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.escapes;
+
+package:
+
+import std.meta : AliasSeq;
+alias escapes = AliasSeq!('0', 'a', 'b', 't', '\t', 'n', 'v', 'f', 'r', 'e', ' ',
+ '\"', '\\', 'N', '_', 'L', 'P');
+
+/// YAML hex codes specifying the length of the hex number.
+alias escapeHexCodeList = AliasSeq!('x', 'u', 'U');
+
+/// Convert a YAML escape to a dchar.
+dchar fromEscape(dchar escape) @safe pure nothrow @nogc
+{
+ switch(escape)
+ {
+ case '0': return '\0';
+ case 'a': return '\x07';
+ case 'b': return '\x08';
+ case 't': return '\x09';
+ case '\t': return '\x09';
+ case 'n': return '\x0A';
+ case 'v': return '\x0B';
+ case 'f': return '\x0C';
+ case 'r': return '\x0D';
+ case 'e': return '\x1B';
+ case ' ': return '\x20';
+ case '\"': return '\"';
+ case '\\': return '\\';
+ case 'N': return '\x85'; //'\u0085';
+ case '_': return '\xA0';
+ case 'L': return '\u2028';
+ case 'P': return '\u2029';
+ default: assert(false, "No such YAML escape");
+ }
+}
+
+/**
+ * Convert a dchar to a YAML escape.
+ *
+ * Params:
+ * value = The possibly escapable character.
+ *
+ * Returns:
+ * If the character passed as parameter can be escaped, returns the matching
+ * escape, otherwise returns a null character.
+ */
+dchar toEscape(dchar value) @safe pure nothrow @nogc
+{
+ switch(value)
+ {
+ case '\0': return '0';
+ case '\x07': return 'a';
+ case '\x08': return 'b';
+ case '\x09': return 't';
+ case '\x0A': return 'n';
+ case '\x0B': return 'v';
+ case '\x0C': return 'f';
+ case '\x0D': return 'r';
+ case '\x1B': return 'e';
+ case '\"': return '\"';
+ case '\\': return '\\';
+ case '\xA0': return '_';
+ case '\x85': return 'N';
+ case '\u2028': return 'L';
+ case '\u2029': return 'P';
+ default: return 0;
+ }
+}
+
+/// Get the length of a hexadecimal number determined by its hex code.
+///
+/// Need a function as associative arrays don't work with @nogc.
+/// (And this may be even faster with a function.)
+uint escapeHexLength(dchar hexCode) @safe pure nothrow @nogc
+{
+ switch(hexCode)
+ {
+ case 'x': return 2;
+ case 'u': return 4;
+ case 'U': return 8;
+ default: assert(false, "No such YAML hex code");
+ }
+}
+
diff --git a/src/ext_depends/D-YAML/source/dyaml/event.d b/src/ext_depends/D-YAML/source/dyaml/event.d
new file mode 100644
index 0000000..f4a747f
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/event.d
@@ -0,0 +1,243 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * YAML events.
+ * Code based on PyYAML: http://www.pyyaml.org
+ */
+module dyaml.event;
+
+import std.array;
+import std.conv;
+
+import dyaml.exception;
+import dyaml.reader;
+import dyaml.tagdirective;
+import dyaml.style;
+
+
+package:
+///Event types.
+enum EventID : ubyte
+{
+ invalid = 0, /// Invalid (uninitialized) event.
+ streamStart, /// Stream start
+ streamEnd, /// Stream end
+ documentStart, /// Document start
+ documentEnd, /// Document end
+ alias_, /// Alias
+ scalar, /// Scalar
+ sequenceStart, /// Sequence start
+ sequenceEnd, /// Sequence end
+ mappingStart, /// Mapping start
+ mappingEnd /// Mapping end
+}
+
+/**
+ * YAML event produced by parser.
+ *
+ * 48 bytes on 64bit.
+ */
+struct Event
+{
+ @disable int opCmp(ref Event);
+
+ ///Value of the event, if any.
+ string value;
+ ///Start position of the event in file/stream.
+ Mark startMark;
+ ///End position of the event in file/stream.
+ Mark endMark;
+ union
+ {
+ struct
+ {
+ ///Anchor of the event, if any.
+ string _anchor;
+ ///Tag of the event, if any.
+ string _tag;
+ }
+ ///Tag directives, if this is a DocumentStart.
+ //TagDirectives tagDirectives;
+ TagDirective[] _tagDirectives;
+ }
+ ///Event type.
+ EventID id = EventID.invalid;
+ ///Style of scalar event, if this is a scalar event.
+ ScalarStyle scalarStyle = ScalarStyle.invalid;
+ union
+ {
+ ///Should the tag be implicitly resolved?
+ bool implicit;
+ /**
+ * Is this document event explicit?
+ *
+ * Used if this is a DocumentStart or DocumentEnd.
+ */
+ bool explicitDocument;
+ }
+ ///Collection style, if this is a SequenceStart or MappingStart.
+ CollectionStyle collectionStyle = CollectionStyle.invalid;
+
+ ///Is this a null (uninitialized) event?
+ @property bool isNull() const pure @safe nothrow {return id == EventID.invalid;}
+
+ ///Get string representation of the token ID.
+ @property string idString() const @safe {return to!string(id);}
+
+ auto ref anchor() inout @trusted pure {
+ assert(id != EventID.documentStart, "DocumentStart events cannot have anchors.");
+ return _anchor;
+ }
+
+ auto ref tag() inout @trusted pure {
+ assert(id != EventID.documentStart, "DocumentStart events cannot have tags.");
+ return _tag;
+ }
+
+ auto ref tagDirectives() inout @trusted pure {
+ assert(id == EventID.documentStart, "Only DocumentStart events have tag directives.");
+ return _tagDirectives;
+ }
+}
+
+/**
+ * Construct a simple event.
+ *
+ * Params: start = Start position of the event in the file/stream.
+ * end = End position of the event in the file/stream.
+ * anchor = Anchor, if this is an alias event.
+ */
+Event event(EventID id)(const Mark start, const Mark end, const string anchor = null)
+ @safe
+ in(!(id == EventID.alias_ && anchor == ""), "Missing anchor for alias event")
+{
+ Event result;
+ result.startMark = start;
+ result.endMark = end;
+ result.anchor = anchor;
+ result.id = id;
+ return result;
+}
+
+/**
+ * Construct a collection (mapping or sequence) start event.
+ *
+ * Params: start = Start position of the event in the file/stream.
+ * end = End position of the event in the file/stream.
+ * anchor = Anchor of the sequence, if any.
+ * tag = Tag of the sequence, if specified.
+ * implicit = Should the tag be implicitly resolved?
+ * style = Style to use when outputting document.
+ */
+Event collectionStartEvent(EventID id)
+ (const Mark start, const Mark end, const string anchor, const string tag,
+ const bool implicit, const CollectionStyle style) pure @safe nothrow
+{
+ static assert(id == EventID.sequenceStart || id == EventID.sequenceEnd ||
+ id == EventID.mappingStart || id == EventID.mappingEnd);
+ Event result;
+ result.startMark = start;
+ result.endMark = end;
+ result.anchor = anchor;
+ result.tag = tag;
+ result.id = id;
+ result.implicit = implicit;
+ result.collectionStyle = style;
+ return result;
+}
+
+/**
+ * Construct a stream start event.
+ *
+ * Params: start = Start position of the event in the file/stream.
+ * end = End position of the event in the file/stream.
+ */
+Event streamStartEvent(const Mark start, const Mark end)
+ pure @safe nothrow
+{
+ Event result;
+ result.startMark = start;
+ result.endMark = end;
+ result.id = EventID.streamStart;
+ return result;
+}
+
+///Aliases for simple events.
+alias streamEndEvent = event!(EventID.streamEnd);
+alias aliasEvent = event!(EventID.alias_);
+alias sequenceEndEvent = event!(EventID.sequenceEnd);
+alias mappingEndEvent = event!(EventID.mappingEnd);
+
+///Aliases for collection start events.
+alias sequenceStartEvent = collectionStartEvent!(EventID.sequenceStart);
+alias mappingStartEvent = collectionStartEvent!(EventID.mappingStart);
+
+/**
+ * Construct a document start event.
+ *
+ * Params: start = Start position of the event in the file/stream.
+ * end = End position of the event in the file/stream.
+ * explicit = Is this an explicit document start?
+ * YAMLVersion = YAML version string of the document.
+ * tagDirectives = Tag directives of the document.
+ */
+Event documentStartEvent(const Mark start, const Mark end, const bool explicit, string YAMLVersion,
+ TagDirective[] tagDirectives) pure @safe nothrow
+{
+ Event result;
+ result.value = YAMLVersion;
+ result.startMark = start;
+ result.endMark = end;
+ result.id = EventID.documentStart;
+ result.explicitDocument = explicit;
+ result.tagDirectives = tagDirectives;
+ return result;
+}
+
+/**
+ * Construct a document end event.
+ *
+ * Params: start = Start position of the event in the file/stream.
+ * end = End position of the event in the file/stream.
+ * explicit = Is this an explicit document end?
+ */
+Event documentEndEvent(const Mark start, const Mark end, const bool explicit) pure @safe nothrow
+{
+ Event result;
+ result.startMark = start;
+ result.endMark = end;
+ result.id = EventID.documentEnd;
+ result.explicitDocument = explicit;
+ return result;
+}
+
+/// Construct a scalar event.
+///
+/// Params: start = Start position of the event in the file/stream.
+/// end = End position of the event in the file/stream.
+/// anchor = Anchor of the scalar, if any.
+/// tag = Tag of the scalar, if specified.
+/// implicit = Should the tag be implicitly resolved?
+/// value = String value of the scalar.
+/// style = Scalar style.
+Event scalarEvent(const Mark start, const Mark end, const string anchor, const string tag,
+ const bool implicit, const string value,
+ const ScalarStyle style = ScalarStyle.invalid) @safe pure nothrow @nogc
+{
+ Event result;
+ result.value = value;
+ result.startMark = start;
+ result.endMark = end;
+
+ result.anchor = anchor;
+ result.tag = tag;
+
+ result.id = EventID.scalar;
+ result.scalarStyle = style;
+ result.implicit = implicit;
+ return result;
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/exception.d b/src/ext_depends/D-YAML/source/dyaml/exception.d
new file mode 100644
index 0000000..2f13a44
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/exception.d
@@ -0,0 +1,159 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+///Exceptions thrown by D:YAML and _exception related code.
+module dyaml.exception;
+
+
+import std.algorithm;
+import std.array;
+import std.string;
+import std.conv;
+
+
+/// Base class for all exceptions thrown by D:YAML.
+class YAMLException : Exception
+{
+ /// Construct a YAMLException with specified message and position where it was thrown.
+ public this(string msg, string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super(msg, file, line);
+ }
+}
+
+/// Position in a YAML stream, used for error messages.
+struct Mark
+{
+ package:
+ /// File name.
+ string name_;
+ /// Line number.
+ ushort line_;
+ /// Column number.
+ ushort column_;
+
+ public:
+ /// Construct a Mark with specified line and column in the file.
+ this(string name, const uint line, const uint column) @safe pure nothrow @nogc
+ {
+ name_ = name;
+ line_ = cast(ushort)min(ushort.max, line);
+ // This *will* overflow on extremely wide files but saves CPU time
+ // (mark ctor takes ~5% of time)
+ column_ = cast(ushort)column;
+ }
+
+ /// Get a file name.
+ @property string name() @safe pure nothrow @nogc const
+ {
+ return name_;
+ }
+
+ /// Get a line number.
+ @property ushort line() @safe pure nothrow @nogc const
+ {
+ return line_;
+ }
+
+ /// Get a column number.
+ @property ushort column() @safe pure nothrow @nogc const
+ {
+ return column_;
+ }
+
+ /// Get a string representation of the mark.
+ string toString() @safe pure nothrow const
+ {
+ // Line/column numbers start at zero internally, make them start at 1.
+ static string clamped(ushort v) @safe pure nothrow
+ {
+ return text(v + 1, v == ushort.max ? " or higher" : "");
+ }
+ return "file " ~ name_ ~ ",line " ~ clamped(line_) ~ ",column " ~ clamped(column_);
+ }
+}
+
+package:
+// A struct storing parameters to the MarkedYAMLException constructor.
+struct MarkedYAMLExceptionData
+{
+ // Context of the error.
+ string context;
+ // Position of the context in a YAML buffer.
+ Mark contextMark;
+ // The error itself.
+ string problem;
+ // Position if the error.
+ Mark problemMark;
+}
+
+// Base class of YAML exceptions with marked positions of the problem.
+abstract class MarkedYAMLException : YAMLException
+{
+ // Construct a MarkedYAMLException with specified context and problem.
+ this(string context, const Mark contextMark, string problem, const Mark problemMark,
+ string file = __FILE__, size_t line = __LINE__) @safe pure nothrow
+ {
+ const msg = context ~ '\n' ~
+ (contextMark != problemMark ? contextMark.toString() ~ '\n' : "") ~
+ problem ~ '\n' ~ problemMark.toString() ~ '\n';
+ super(msg, file, line);
+ }
+
+ // Construct a MarkedYAMLException with specified problem.
+ this(string problem, const Mark problemMark,
+ string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super(problem ~ '\n' ~ problemMark.toString(), file, line);
+ }
+
+ /// Construct a MarkedYAMLException from a struct storing constructor parameters.
+ this(ref const(MarkedYAMLExceptionData) data) @safe pure nothrow
+ {
+ with(data) this(context, contextMark, problem, problemMark);
+ }
+}
+
+// Constructors of YAML exceptions are mostly the same, so we use a mixin.
+//
+// See_Also: YAMLException
+template ExceptionCtors()
+{
+ public this(string msg, string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super(msg, file, line);
+ }
+}
+
+// Constructors of marked YAML exceptions are mostly the same, so we use a mixin.
+//
+// See_Also: MarkedYAMLException
+template MarkedExceptionCtors()
+{
+ public:
+ this(string context, const Mark contextMark, string problem,
+ const Mark problemMark, string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super(context, contextMark, problem, problemMark,
+ file, line);
+ }
+
+ this(string problem, const Mark problemMark,
+ string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super(problem, problemMark, file, line);
+ }
+
+ this(ref const(MarkedYAMLExceptionData) data) @safe pure nothrow
+ {
+ super(data);
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/linebreak.d b/src/ext_depends/D-YAML/source/dyaml/linebreak.d
new file mode 100644
index 0000000..1f0f661
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/linebreak.d
@@ -0,0 +1,32 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.linebreak;
+
+
+///Enumerates platform specific line breaks.
+enum LineBreak
+{
+ ///Unix line break ("\n").
+ unix,
+ ///Windows line break ("\r\n").
+ windows,
+ ///Macintosh line break ("\r").
+ macintosh
+}
+
+package:
+
+//Get line break string for specified line break.
+string lineBreak(in LineBreak b) pure @safe nothrow
+{
+ final switch(b)
+ {
+ case LineBreak.unix: return "\n";
+ case LineBreak.windows: return "\r\n";
+ case LineBreak.macintosh: return "\r";
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/loader.d b/src/ext_depends/D-YAML/source/dyaml/loader.d
new file mode 100644
index 0000000..7e7096c
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/loader.d
@@ -0,0 +1,394 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/// Class used to load YAML documents.
+module dyaml.loader;
+
+
+import std.exception;
+import std.file;
+import std.stdio : File;
+import std.string;
+
+import dyaml.composer;
+import dyaml.constructor;
+import dyaml.event;
+import dyaml.exception;
+import dyaml.node;
+import dyaml.parser;
+import dyaml.reader;
+import dyaml.resolver;
+import dyaml.scanner;
+import dyaml.token;
+
+
+/** Loads YAML documents from files or char[].
+ *
+ * User specified Constructor and/or Resolver can be used to support new
+ * tags / data types.
+ */
+struct Loader
+{
+ private:
+ // Processes character data to YAML tokens.
+ Scanner scanner_;
+ // Processes tokens to YAML events.
+ Parser parser_;
+ // Resolves tags (data types).
+ Resolver resolver_;
+ // Name of the input file or stream, used in error messages.
+ string name_ = "<unknown>";
+ // Are we done loading?
+ bool done_;
+ // Last node read from stream
+ Node currentNode;
+ // Has the range interface been initialized yet?
+ bool rangeInitialized;
+
+ public:
+ @disable this();
+ @disable int opCmp(ref Loader);
+ @disable bool opEquals(ref Loader);
+
+ /** Construct a Loader to load YAML from a file.
+ *
+ * Params: filename = Name of the file to load from.
+ * file = Already-opened file to load from.
+ *
+ * Throws: YAMLException if the file could not be opened or read.
+ */
+ static Loader fromFile(string filename) @trusted
+ {
+ try
+ {
+ auto loader = Loader(std.file.read(filename), filename);
+ return loader;
+ }
+ catch(FileException e)
+ {
+ throw new YAMLException("Unable to open file %s for YAML loading: %s"
+ .format(filename, e.msg), e.file, e.line);
+ }
+ }
+ /// ditto
+ static Loader fromFile(File file) @system
+ {
+ auto loader = Loader(file.byChunk(4096).join, file.name);
+ return loader;
+ }
+
+ /** Construct a Loader to load YAML from a string.
+ *
+ * Params: data = String to load YAML from. The char[] version $(B will)
+ * overwrite its input during parsing as D:YAML reuses memory.
+ *
+ * Returns: Loader loading YAML from given string.
+ *
+ * Throws:
+ *
+ * YAMLException if data could not be read (e.g. a decoding error)
+ */
+ static Loader fromString(char[] data) @safe
+ {
+ return Loader(cast(ubyte[])data);
+ }
+ /// Ditto
+ static Loader fromString(string data) @safe
+ {
+ return fromString(data.dup);
+ }
+ /// Load a char[].
+ @safe unittest
+ {
+ assert(Loader.fromString("42".dup).load().as!int == 42);
+ }
+ /// Load a string.
+ @safe unittest
+ {
+ assert(Loader.fromString("42").load().as!int == 42);
+ }
+
+ /** Construct a Loader to load YAML from a buffer.
+ *
+ * Params: yamlData = Buffer with YAML data to load. This may be e.g. a file
+ * loaded to memory or a string with YAML data. Note that
+ * buffer $(B will) be overwritten, as D:YAML minimizes
+ * memory allocations by reusing the input _buffer.
+ * $(B Must not be deleted or modified by the user as long
+ * as nodes loaded by this Loader are in use!) - Nodes may
+ * refer to data in this buffer.
+ *
+ * Note that D:YAML looks for byte-order-marks YAML files encoded in
+ * UTF-16/UTF-32 (and sometimes UTF-8) use to specify the encoding and
+ * endianness, so it should be enough to load an entire file to a buffer and
+ * pass it to D:YAML, regardless of Unicode encoding.
+ *
+ * Throws: YAMLException if yamlData contains data illegal in YAML.
+ */
+ static Loader fromBuffer(ubyte[] yamlData) @safe
+ {
+ return Loader(yamlData);
+ }
+ /// Ditto
+ static Loader fromBuffer(void[] yamlData) @system
+ {
+ return Loader(yamlData);
+ }
+ /// Ditto
+ private this(void[] yamlData, string name = "<unknown>") @system
+ {
+ this(cast(ubyte[])yamlData, name);
+ }
+ /// Ditto
+ private this(ubyte[] yamlData, string name = "<unknown>") @safe
+ {
+ resolver_ = Resolver.withDefaultResolvers;
+ name_ = name;
+ try
+ {
+ auto reader_ = new Reader(yamlData, name);
+ scanner_ = Scanner(reader_);
+ parser_ = new Parser(scanner_);
+ }
+ catch(YAMLException e)
+ {
+ throw new YAMLException("Unable to open %s for YAML loading: %s"
+ .format(name_, e.msg), e.file, e.line);
+ }
+ }
+
+
+ /// Set stream _name. Used in debugging messages.
+ void name(string name) pure @safe nothrow @nogc
+ {
+ name_ = name;
+ }
+
+ /// Specify custom Resolver to use.
+ auto ref resolver() pure @safe nothrow @nogc
+ {
+ return resolver_;
+ }
+
+ /** Load single YAML document.
+ *
+ * If none or more than one YAML document is found, this throws a YAMLException.
+ *
+ * This can only be called once; this is enforced by contract.
+ *
+ * Returns: Root node of the document.
+ *
+ * Throws: YAMLException if there wasn't exactly one document
+ * or on a YAML parsing error.
+ */
+ Node load() @safe
+ {
+ enforce!YAMLException(!empty, "Zero documents in stream");
+ auto output = front;
+ popFront();
+ enforce!YAMLException(empty, "More than one document in stream");
+ return output;
+ }
+
+ /** Implements the empty range primitive.
+ *
+ * If there's no more documents left in the stream, this will be true.
+ *
+ * Returns: `true` if no more documents left, `false` otherwise.
+ */
+ bool empty() @safe
+ {
+ // currentNode and done_ are both invalid until popFront is called once
+ if (!rangeInitialized)
+ {
+ popFront();
+ }
+ return done_;
+ }
+ /** Implements the popFront range primitive.
+ *
+ * Reads the next document from the stream, if possible.
+ */
+ void popFront() @safe
+ {
+ // Composer initialization is done here in case the constructor is
+ // modified, which is a pretty common case.
+ static Composer composer;
+ if (!rangeInitialized)
+ {
+ composer = Composer(parser_, resolver_);
+ rangeInitialized = true;
+ }
+ assert(!done_, "Loader.popFront called on empty range");
+ if (composer.checkNode())
+ {
+ currentNode = composer.getNode();
+ }
+ else
+ {
+ done_ = true;
+ }
+ }
+ /** Implements the front range primitive.
+ *
+ * Returns: the current document as a Node.
+ */
+ Node front() @safe
+ {
+ // currentNode and done_ are both invalid until popFront is called once
+ if (!rangeInitialized)
+ {
+ popFront();
+ }
+ return currentNode;
+ }
+
+ // Scan all tokens, throwing them away. Used for benchmarking.
+ void scanBench() @safe
+ {
+ try
+ {
+ while(!scanner_.empty)
+ {
+ scanner_.popFront();
+ }
+ }
+ catch(YAMLException e)
+ {
+ throw new YAMLException("Unable to scan YAML from stream " ~
+ name_ ~ " : " ~ e.msg, e.file, e.line);
+ }
+ }
+
+
+ // Parse and return all events. Used for debugging.
+ auto parse() @safe
+ {
+ return parser_;
+ }
+}
+/// Load single YAML document from a file:
+@safe unittest
+{
+ write("example.yaml", "Hello world!");
+ auto rootNode = Loader.fromFile("example.yaml").load();
+ assert(rootNode == "Hello world!");
+}
+/// Load single YAML document from an already-opened file:
+@system unittest
+{
+ // Open a temporary file
+ auto file = File.tmpfile;
+ // Write valid YAML
+ file.write("Hello world!");
+ // Return to the beginning
+ file.seek(0);
+ // Load document
+ auto rootNode = Loader.fromFile(file).load();
+ assert(rootNode == "Hello world!");
+}
+/// Load all YAML documents from a file:
+@safe unittest
+{
+ import std.array : array;
+ import std.file : write;
+ write("example.yaml",
+ "---\n"~
+ "Hello world!\n"~
+ "...\n"~
+ "---\n"~
+ "Hello world 2!\n"~
+ "...\n"
+ );
+ auto nodes = Loader.fromFile("example.yaml").array;
+ assert(nodes.length == 2);
+}
+/// Iterate over YAML documents in a file, lazily loading them:
+@safe unittest
+{
+ import std.file : write;
+ write("example.yaml",
+ "---\n"~
+ "Hello world!\n"~
+ "...\n"~
+ "---\n"~
+ "Hello world 2!\n"~
+ "...\n"
+ );
+ auto loader = Loader.fromFile("example.yaml");
+
+ foreach(ref node; loader)
+ {
+ //Do something
+ }
+}
+/// Load YAML from a string:
+@safe unittest
+{
+ string yaml_input = ("red: '#ff0000'\n" ~
+ "green: '#00ff00'\n" ~
+ "blue: '#0000ff'");
+
+ auto colors = Loader.fromString(yaml_input).load();
+
+ foreach(string color, string value; colors)
+ {
+ // Do something with the color and its value...
+ }
+}
+
+/// Load a file into a buffer in memory and then load YAML from that buffer:
+@safe unittest
+{
+ import std.file : read, write;
+ import std.stdio : writeln;
+ // Create a yaml document
+ write("example.yaml",
+ "---\n"~
+ "Hello world!\n"~
+ "...\n"~
+ "---\n"~
+ "Hello world 2!\n"~
+ "...\n"
+ );
+ try
+ {
+ string buffer = readText("example.yaml");
+ auto yamlNode = Loader.fromString(buffer);
+
+ // Read data from yamlNode here...
+ }
+ catch(FileException e)
+ {
+ writeln("Failed to read file 'example.yaml'");
+ }
+}
+/// Use a custom resolver to support custom data types and/or implicit tags:
+@safe unittest
+{
+ import std.file : write;
+ // Create a yaml document
+ write("example.yaml",
+ "---\n"~
+ "Hello world!\n"~
+ "...\n"
+ );
+
+ auto loader = Loader.fromFile("example.yaml");
+
+ // Add resolver expressions here...
+ // loader.resolver.addImplicitResolver(...);
+
+ auto rootNode = loader.load();
+}
+
+//Issue #258 - https://github.com/dlang-community/D-YAML/issues/258
+@safe unittest
+{
+ auto yaml = "{\n\"root\": {\n\t\"key\": \"value\"\n }\n}";
+ auto doc = Loader.fromString(yaml).load();
+ assert(doc.isValid);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/node.d b/src/ext_depends/D-YAML/source/dyaml/node.d
new file mode 100644
index 0000000..24a62a4
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/node.d
@@ -0,0 +1,2488 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/// Node of a YAML document. Used to read YAML data once it's loaded,
+/// and to prepare data to emit.
+module dyaml.node;
+
+
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.datetime;
+import std.exception;
+import std.math;
+import std.meta : AliasSeq;
+import std.range;
+import std.string;
+import std.traits;
+import std.typecons;
+import std.variant;
+
+import dyaml.event;
+import dyaml.exception;
+import dyaml.style;
+
+/// Exception thrown at node related errors.
+class NodeException : YAMLException
+{
+ package:
+ // Construct a NodeException.
+ //
+ // Params: msg = Error message.
+ // start = Start position of the node.
+ this(string msg, Mark start, string file = __FILE__, size_t line = __LINE__)
+ @safe
+ {
+ super(msg ~ "\nNode at: " ~ start.toString(), file, line);
+ }
+}
+
+// Node kinds.
+enum NodeID : ubyte
+{
+ scalar,
+ sequence,
+ mapping,
+ invalid
+}
+
+/// Null YAML type. Used in nodes with _null values.
+struct YAMLNull
+{
+ /// Used for string conversion.
+ string toString() const pure @safe nothrow {return "null";}
+}
+
+// Merge YAML type, used to support "tag:yaml.org,2002:merge".
+package struct YAMLMerge{}
+
+// Key-value pair of YAML nodes, used in mappings.
+private struct Pair
+{
+ public:
+ /// Key node.
+ Node key;
+ /// Value node.
+ Node value;
+
+ /// Construct a Pair from two values. Will be converted to Nodes if needed.
+ this(K, V)(K key, V value)
+ {
+ static if(is(Unqual!K == Node)){this.key = key;}
+ else {this.key = Node(key);}
+ static if(is(Unqual!V == Node)){this.value = value;}
+ else {this.value = Node(value);}
+ }
+
+ /// Equality test with another Pair.
+ bool opEquals(const ref Pair rhs) const @safe
+ {
+ return key == rhs.key && value == rhs.value;
+ }
+
+ // Comparison with another Pair.
+ int opCmp(ref const(Pair) rhs) const @safe
+ {
+ const keyCmp = key.opCmp(rhs.key);
+ return keyCmp != 0 ? keyCmp
+ : value.opCmp(rhs.value);
+ }
+}
+
+enum NodeType
+{
+ null_,
+ merge,
+ boolean,
+ integer,
+ decimal,
+ binary,
+ timestamp,
+ string,
+ mapping,
+ sequence,
+ invalid
+}
+
+/** YAML node.
+ *
+ * This is a pseudo-dynamic type that can store any YAML value, including a
+ * sequence or mapping of nodes. You can get data from a Node directly or
+ * iterate over it if it's a collection.
+ */
+struct Node
+{
+ public:
+ alias Pair = .Pair;
+
+ package:
+ // YAML value type.
+ alias Value = Algebraic!(YAMLNull, YAMLMerge, bool, long, real, ubyte[], SysTime, string,
+ Node.Pair[], Node[]);
+
+ // Can Value hold this type naturally?
+ enum allowed(T) = isIntegral!T ||
+ isFloatingPoint!T ||
+ isSomeString!T ||
+ is(Unqual!T == bool) ||
+ Value.allowed!T;
+
+ // Stored value.
+ Value value_;
+ // Start position of the node.
+ Mark startMark_;
+
+ // Tag of the node.
+ string tag_;
+ // Node scalar style. Used to remember style this node was loaded with.
+ ScalarStyle scalarStyle = ScalarStyle.invalid;
+ // Node collection style. Used to remember style this node was loaded with.
+ CollectionStyle collectionStyle = CollectionStyle.invalid;
+
+ public:
+ /** Construct a Node from a value.
+ *
+ * Any type except for Node can be stored in a Node, but default YAML
+ * types (integers, floats, strings, timestamps, etc.) will be stored
+ * more efficiently. To create a node representing a null value,
+ * construct it from YAMLNull.
+ *
+ * If value is a node, its value will be copied directly. The tag and
+ * other information attached to the original node will be discarded.
+ *
+ * If value is an array of nodes or pairs, it is stored directly.
+ * Otherwise, every value in the array is converted to a node, and
+ * those nodes are stored.
+ *
+ * Note that to emit any non-default types you store
+ * in a node, you need a Representer to represent them in YAML -
+ * otherwise emitting will fail.
+ *
+ * Params: value = Value to store in the node.
+ * tag = Overrides tag of the node when emitted, regardless
+ * of tag determined by Representer. Representer uses
+ * this to determine YAML data type when a D data type
+ * maps to multiple different YAML data types. Tag must
+ * be in full form, e.g. "tag:yaml.org,2002:int", not
+ * a shortcut, like "!!int".
+ */
+ this(T)(T value, const string tag = null) @safe
+ if (allowed!T || isArray!T || isAssociativeArray!T || is(Unqual!T == Node) || castableToNode!T)
+ {
+ tag_ = tag;
+
+ //Unlike with assignment, we're just copying the value.
+ static if (is(Unqual!T == Node))
+ {
+ setValue(value.value_);
+ }
+ else static if(isSomeString!T)
+ {
+ setValue(value.to!string);
+ }
+ else static if(is(Unqual!T == bool))
+ {
+ setValue(cast(bool)value);
+ }
+ else static if(isIntegral!T)
+ {
+ setValue(cast(long)value);
+ }
+ else static if(isFloatingPoint!T)
+ {
+ setValue(cast(real)value);
+ }
+ else static if (isArray!T)
+ {
+ alias ElementT = Unqual!(ElementType!T);
+ // Construction from raw node or pair array.
+ static if(is(ElementT == Node) || is(ElementT == Node.Pair))
+ {
+ setValue(value);
+ }
+ // Need to handle byte buffers separately.
+ else static if(is(ElementT == byte) || is(ElementT == ubyte))
+ {
+ setValue(cast(ubyte[]) value);
+ }
+ else
+ {
+ Node[] nodes;
+ foreach(ref v; value)
+ {
+ nodes ~= Node(v);
+ }
+ setValue(nodes);
+ }
+ }
+ else static if (isAssociativeArray!T)
+ {
+ Node.Pair[] pairs;
+ foreach(k, ref v; value)
+ {
+ pairs ~= Pair(k, v);
+ }
+ setValue(pairs);
+ }
+ // User defined type.
+ else
+ {
+ setValue(value);
+ }
+ }
+ /// Construct a scalar node
+ @safe unittest
+ {
+ // Integer
+ {
+ auto node = Node(5);
+ }
+ // String
+ {
+ auto node = Node("Hello world!");
+ }
+ // Floating point
+ {
+ auto node = Node(5.0f);
+ }
+ // Boolean
+ {
+ auto node = Node(true);
+ }
+ // Time
+ {
+ auto node = Node(SysTime(DateTime(2005, 6, 15, 20, 0, 0), UTC()));
+ }
+ // Integer, dumped as a string
+ {
+ auto node = Node(5, "tag:yaml.org,2002:str");
+ }
+ }
+ /// Construct a sequence node
+ @safe unittest
+ {
+ // Will be emitted as a sequence (default for arrays)
+ {
+ auto seq = Node([1, 2, 3, 4, 5]);
+ }
+ // Will be emitted as a set (overridden tag)
+ {
+ auto set = Node([1, 2, 3, 4, 5], "tag:yaml.org,2002:set");
+ }
+ // Can also store arrays of arrays
+ {
+ auto node = Node([[1,2], [3,4]]);
+ }
+ }
+ /// Construct a mapping node
+ @safe unittest
+ {
+ // Will be emitted as an unordered mapping (default for mappings)
+ auto map = Node([1 : "a", 2 : "b"]);
+ // Will be emitted as an ordered map (overridden tag)
+ auto omap = Node([1 : "a", 2 : "b"], "tag:yaml.org,2002:omap");
+ // Will be emitted as pairs (overridden tag)
+ auto pairs = Node([1 : "a", 2 : "b"], "tag:yaml.org,2002:pairs");
+ }
+ @safe unittest
+ {
+ {
+ auto node = Node(42);
+ assert(node.nodeID == NodeID.scalar);
+ assert(node.as!int == 42 && node.as!float == 42.0f && node.as!string == "42");
+ }
+
+ {
+ auto node = Node("string");
+ assert(node.as!string == "string");
+ }
+ }
+ @safe unittest
+ {
+ with(Node([1, 2, 3]))
+ {
+ assert(nodeID == NodeID.sequence);
+ assert(length == 3);
+ assert(opIndex(2).as!int == 3);
+ }
+
+ }
+ @safe unittest
+ {
+ int[string] aa;
+ aa["1"] = 1;
+ aa["2"] = 2;
+ with(Node(aa))
+ {
+ assert(nodeID == NodeID.mapping);
+ assert(length == 2);
+ assert(opIndex("2").as!int == 2);
+ }
+ }
+ @safe unittest
+ {
+ auto node = Node(Node(4, "tag:yaml.org,2002:str"));
+ assert(node == 4);
+ assert(node.tag_ == "");
+ }
+
+ /** Construct a node from arrays of _keys and _values.
+ *
+ * Constructs a mapping node with key-value pairs from
+ * _keys and _values, keeping their order. Useful when order
+ * is important (ordered maps, pairs).
+ *
+ *
+ * keys and values must have equal length.
+ *
+ *
+ * If _keys and/or _values are nodes, they are stored directly/
+ * Otherwise they are converted to nodes and then stored.
+ *
+ * Params: keys = Keys of the mapping, from first to last pair.
+ * values = Values of the mapping, from first to last pair.
+ * tag = Overrides tag of the node when emitted, regardless
+ * of tag determined by Representer. Representer uses
+ * this to determine YAML data type when a D data type
+ * maps to multiple different YAML data types.
+ * This is used to differentiate between YAML unordered
+ * mappings ("!!map"), ordered mappings ("!!omap"), and
+ * pairs ("!!pairs") which are all internally
+ * represented as an array of node pairs. Tag must be
+ * in full form, e.g. "tag:yaml.org,2002:omap", not a
+ * shortcut, like "!!omap".
+ *
+ */
+ this(K, V)(K[] keys, V[] values, const string tag = null)
+ if(!(isSomeString!(K[]) || isSomeString!(V[])))
+ in(keys.length == values.length,
+ "Lengths of keys and values arrays to construct " ~
+ "a YAML node from don't match")
+ {
+ tag_ = tag;
+
+ Node.Pair[] pairs;
+ foreach(i; 0 .. keys.length){pairs ~= Pair(keys[i], values[i]);}
+ setValue(pairs);
+ }
+ ///
+ @safe unittest
+ {
+ // Will be emitted as an unordered mapping (default for mappings)
+ auto map = Node([1, 2], ["a", "b"]);
+ // Will be emitted as an ordered map (overridden tag)
+ auto omap = Node([1, 2], ["a", "b"], "tag:yaml.org,2002:omap");
+ // Will be emitted as pairs (overriden tag)
+ auto pairs = Node([1, 2], ["a", "b"], "tag:yaml.org,2002:pairs");
+ }
+ @safe unittest
+ {
+ with(Node(["1", "2"], [1, 2]))
+ {
+ assert(nodeID == NodeID.mapping);
+ assert(length == 2);
+ assert(opIndex("2").as!int == 2);
+ }
+
+ }
+
+ /// Is this node valid (initialized)?
+ @property bool isValid() const @safe pure nothrow
+ {
+ return value_.hasValue;
+ }
+
+ /// Return tag of the node.
+ @property string tag() const @safe nothrow
+ {
+ return tag_;
+ }
+
+ /// Return the start position of the node.
+ @property Mark startMark() const @safe pure nothrow
+ {
+ return startMark_;
+ }
+
+ /** Equality test.
+ *
+ * If T is Node, recursively compares all subnodes.
+ * This might be quite expensive if testing entire documents.
+ *
+ * If T is not Node, gets a value of type T from the node and tests
+ * equality with that.
+ *
+ * To test equality with a null YAML value, use YAMLNull.
+ *
+ * Params: rhs = Variable to test equality with.
+ *
+ * Returns: true if equal, false otherwise.
+ */
+ bool opEquals(const Node rhs) const @safe
+ {
+ return opCmp(rhs) == 0;
+ }
+ bool opEquals(T)(const auto ref T rhs) const
+ {
+ try
+ {
+ auto stored = get!(T, No.stringConversion);
+ // NaNs aren't normally equal to each other, but we'll pretend they are.
+ static if(isFloatingPoint!T)
+ {
+ return rhs == stored || (isNaN(rhs) && isNaN(stored));
+ }
+ else
+ {
+ return rhs == stored;
+ }
+ }
+ catch(NodeException e)
+ {
+ return false;
+ }
+ }
+ ///
+ @safe unittest
+ {
+ auto node = Node(42);
+
+ assert(node == 42);
+ assert(node != "42");
+ assert(node != "43");
+
+ auto node2 = Node(YAMLNull());
+ assert(node2 == YAMLNull());
+
+ const node3 = Node(42);
+ assert(node3 == 42);
+ }
+
+ /// Shortcut for get().
+ alias as = get;
+
+ /** Get the value of the node as specified type.
+ *
+ * If the specifed type does not match type in the node,
+ * conversion is attempted. The stringConversion template
+ * parameter can be used to disable conversion from non-string
+ * types to strings.
+ *
+ * Numeric values are range checked, throwing if out of range of
+ * requested type.
+ *
+ * Timestamps are stored as std.datetime.SysTime.
+ * Binary values are decoded and stored as ubyte[].
+ *
+ * To get a null value, use get!YAMLNull . This is to
+ * prevent getting null values for types such as strings or classes.
+ *
+ * $(BR)$(B Mapping default values:)
+ *
+ * $(PBR
+ * The '=' key can be used to denote the default value of a mapping.
+ * This can be used when a node is scalar in early versions of a program,
+ * but is replaced by a mapping later. Even if the node is a mapping, the
+ * get method can be used as if it was a scalar if it has a default value.
+ * This way, new YAML files where the node is a mapping can still be read
+ * by old versions of the program, which expect the node to be a scalar.
+ * )
+ *
+ * Returns: Value of the node as specified type.
+ *
+ * Throws: NodeException if unable to convert to specified type, or if
+ * the value is out of range of requested type.
+ */
+ inout(T) get(T, Flag!"stringConversion" stringConversion = Yes.stringConversion)() inout
+ if (allowed!(Unqual!T) || hasNodeConstructor!(Unqual!T))
+ {
+ if(isType!(Unqual!T)){return getValue!T;}
+
+ static if(!allowed!(Unqual!T))
+ {
+ static if (hasSimpleNodeConstructor!T)
+ {
+ alias params = AliasSeq!(this);
+ }
+ else static if (hasExpandedNodeConstructor!T)
+ {
+ alias params = AliasSeq!(this, tag_);
+ }
+ else
+ {
+ static assert(0, "Unknown Node constructor?");
+ }
+
+ static if (is(T == class))
+ {
+ return new inout T(params);
+ }
+ else static if (is(T == struct))
+ {
+ return T(params);
+ }
+ else
+ {
+ static assert(0, "Unhandled user type");
+ }
+ } else {
+
+ // If we're getting from a mapping and we're not getting Node.Pair[],
+ // we're getting the default value.
+ if(nodeID == NodeID.mapping){return this["="].get!( T, stringConversion);}
+
+ static if(isSomeString!T)
+ {
+ static if(!stringConversion)
+ {
+ enforce(type == NodeType.string, new NodeException(
+ "Node stores unexpected type: " ~ text(type) ~
+ ". Expected: " ~ typeid(T).toString(), startMark_));
+ return to!T(getValue!string);
+ }
+ else
+ {
+ // Try to convert to string.
+ try
+ {
+ return coerceValue!T();
+ }
+ catch(VariantException e)
+ {
+ throw new NodeException("Unable to convert node value to string", startMark_);
+ }
+ }
+ }
+ else static if(isFloatingPoint!T)
+ {
+ final switch (type)
+ {
+ case NodeType.integer:
+ return to!T(getValue!long);
+ case NodeType.decimal:
+ return to!T(getValue!real);
+ case NodeType.binary:
+ case NodeType.string:
+ case NodeType.boolean:
+ case NodeType.null_:
+ case NodeType.merge:
+ case NodeType.invalid:
+ case NodeType.timestamp:
+ case NodeType.mapping:
+ case NodeType.sequence:
+ throw new NodeException("Node stores unexpected type: " ~ text(type) ~
+ ". Expected: " ~ typeid(T).toString, startMark_);
+ }
+ }
+ else static if(isIntegral!T)
+ {
+ enforce(type == NodeType.integer, new NodeException("Node stores unexpected type: " ~ text(type) ~
+ ". Expected: " ~ typeid(T).toString, startMark_));
+ immutable temp = getValue!long;
+ enforce(temp >= T.min && temp <= T.max,
+ new NodeException("Integer value of type " ~ typeid(T).toString() ~
+ " out of range. Value: " ~ to!string(temp), startMark_));
+ return temp.to!T;
+ }
+ else throw new NodeException("Node stores unexpected type: " ~ text(type) ~
+ ". Expected: " ~ typeid(T).toString, startMark_);
+ }
+ }
+ /// Automatic type conversion
+ @safe unittest
+ {
+ auto node = Node(42);
+
+ assert(node.get!int == 42);
+ assert(node.get!string == "42");
+ assert(node.get!double == 42.0);
+ }
+ /// Scalar node to struct and vice versa
+ @safe unittest
+ {
+ import dyaml.dumper : dumper;
+ import dyaml.loader : Loader;
+ static struct MyStruct
+ {
+ int x, y, z;
+
+ this(int x, int y, int z) @safe
+ {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ this(Node node) @safe
+ {
+ auto parts = node.as!string().split(":");
+ x = parts[0].to!int;
+ y = parts[1].to!int;
+ z = parts[2].to!int;
+ }
+
+ Node opCast(T: Node)() @safe
+ {
+ //Using custom scalar format, x:y:z.
+ auto scalar = format("%s:%s:%s", x, y, z);
+ //Representing as a scalar, with custom tag to specify this data type.
+ return Node(scalar, "!mystruct.tag");
+ }
+ }
+
+ auto appender = new Appender!string;
+
+ // Dump struct to yaml document
+ dumper().dump(appender, Node(MyStruct(1,2,3)));
+
+ // Read yaml document back as a MyStruct
+ auto loader = Loader.fromString(appender.data);
+ Node node = loader.load();
+ assert(node.as!MyStruct == MyStruct(1,2,3));
+ }
+ /// Sequence node to struct and vice versa
+ @safe unittest
+ {
+ import dyaml.dumper : dumper;
+ import dyaml.loader : Loader;
+ static struct MyStruct
+ {
+ int x, y, z;
+
+ this(int x, int y, int z) @safe
+ {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ this(Node node) @safe
+ {
+ x = node[0].as!int;
+ y = node[1].as!int;
+ z = node[2].as!int;
+ }
+
+ Node opCast(T: Node)()
+ {
+ return Node([x, y, z], "!mystruct.tag");
+ }
+ }
+
+ auto appender = new Appender!string;
+
+ // Dump struct to yaml document
+ dumper().dump(appender, Node(MyStruct(1,2,3)));
+
+ // Read yaml document back as a MyStruct
+ auto loader = Loader.fromString(appender.data);
+ Node node = loader.load();
+ assert(node.as!MyStruct == MyStruct(1,2,3));
+ }
+ /// Mapping node to struct and vice versa
+ @safe unittest
+ {
+ import dyaml.dumper : dumper;
+ import dyaml.loader : Loader;
+ static struct MyStruct
+ {
+ int x, y, z;
+
+ Node opCast(T: Node)()
+ {
+ auto pairs = [Node.Pair("x", x),
+ Node.Pair("y", y),
+ Node.Pair("z", z)];
+ return Node(pairs, "!mystruct.tag");
+ }
+
+ this(int x, int y, int z)
+ {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ this(Node node) @safe
+ {
+ x = node["x"].as!int;
+ y = node["y"].as!int;
+ z = node["z"].as!int;
+ }
+ }
+
+ auto appender = new Appender!string;
+
+ // Dump struct to yaml document
+ dumper().dump(appender, Node(MyStruct(1,2,3)));
+
+ // Read yaml document back as a MyStruct
+ auto loader = Loader.fromString(appender.data);
+ Node node = loader.load();
+ assert(node.as!MyStruct == MyStruct(1,2,3));
+ }
+ /// Classes can be used too
+ @system unittest {
+ import dyaml.dumper : dumper;
+ import dyaml.loader : Loader;
+
+ static class MyClass
+ {
+ int x, y, z;
+
+ this(int x, int y, int z)
+ {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ this(Node node) @safe inout
+ {
+ auto parts = node.as!string().split(":");
+ x = parts[0].to!int;
+ y = parts[1].to!int;
+ z = parts[2].to!int;
+ }
+
+ ///Useful for Node.as!string.
+ override string toString()
+ {
+ return format("MyClass(%s, %s, %s)", x, y, z);
+ }
+
+ Node opCast(T: Node)() @safe
+ {
+ //Using custom scalar format, x:y:z.
+ auto scalar = format("%s:%s:%s", x, y, z);
+ //Representing as a scalar, with custom tag to specify this data type.
+ return Node(scalar, "!myclass.tag");
+ }
+ override bool opEquals(Object o)
+ {
+ if (auto other = cast(MyClass)o)
+ {
+ return (other.x == x) && (other.y == y) && (other.z == z);
+ }
+ return false;
+ }
+ }
+ auto appender = new Appender!string;
+
+ // Dump class to yaml document
+ dumper().dump(appender, Node(new MyClass(1,2,3)));
+
+ // Read yaml document back as a MyClass
+ auto loader = Loader.fromString(appender.data);
+ Node node = loader.load();
+ assert(node.as!MyClass == new MyClass(1,2,3));
+ }
+ // Make sure custom tags and styles are kept.
+ @safe unittest
+ {
+ static struct MyStruct
+ {
+ Node opCast(T: Node)()
+ {
+ auto node = Node("hi", "!mystruct.tag");
+ node.setStyle(ScalarStyle.doubleQuoted);
+ return node;
+ }
+ }
+
+ auto node = Node(MyStruct.init);
+ assert(node.tag == "!mystruct.tag");
+ assert(node.scalarStyle == ScalarStyle.doubleQuoted);
+ }
+ // ditto, but for collection style
+ @safe unittest
+ {
+ static struct MyStruct
+ {
+ Node opCast(T: Node)()
+ {
+ auto node = Node(["hi"], "!mystruct.tag");
+ node.setStyle(CollectionStyle.flow);
+ return node;
+ }
+ }
+
+ auto node = Node(MyStruct.init);
+ assert(node.tag == "!mystruct.tag");
+ assert(node.collectionStyle == CollectionStyle.flow);
+ }
+ @safe unittest
+ {
+ assertThrown!NodeException(Node("42").get!int);
+ assertThrown!NodeException(Node("42").get!double);
+ assertThrown!NodeException(Node(long.max).get!ushort);
+ Node(YAMLNull()).get!YAMLNull;
+ }
+ @safe unittest
+ {
+ const node = Node(42);
+ assert(node.get!int == 42);
+ assert(node.get!string == "42");
+ assert(node.get!double == 42.0);
+
+ immutable node2 = Node(42);
+ assert(node2.get!int == 42);
+ assert(node2.get!(const int) == 42);
+ assert(node2.get!(immutable int) == 42);
+ assert(node2.get!string == "42");
+ assert(node2.get!(const string) == "42");
+ assert(node2.get!(immutable string) == "42");
+ assert(node2.get!double == 42.0);
+ assert(node2.get!(const double) == 42.0);
+ assert(node2.get!(immutable double) == 42.0);
+ }
+
+ /** If this is a collection, return its _length.
+ *
+ * Otherwise, throw NodeException.
+ *
+ * Returns: Number of elements in a sequence or key-value pairs in a mapping.
+ *
+ * Throws: NodeException if this is not a sequence nor a mapping.
+ */
+ @property size_t length() const @safe
+ {
+ final switch(nodeID)
+ {
+ case NodeID.sequence:
+ return getValue!(Node[]).length;
+ case NodeID.mapping:
+ return getValue!(Pair[]).length;
+ case NodeID.scalar:
+ case NodeID.invalid:
+ throw new NodeException("Trying to get length of a " ~ nodeTypeString ~ " node",
+ startMark_);
+ }
+ }
+ @safe unittest
+ {
+ auto node = Node([1,2,3]);
+ assert(node.length == 3);
+ const cNode = Node([1,2,3]);
+ assert(cNode.length == 3);
+ immutable iNode = Node([1,2,3]);
+ assert(iNode.length == 3);
+ }
+
+ /** Get the element at specified index.
+ *
+ * If the node is a sequence, index must be integral.
+ *
+ *
+ * If the node is a mapping, return the value corresponding to the first
+ * key equal to index. containsKey() can be used to determine if a mapping
+ * has a specific key.
+ *
+ * To get element at a null index, use YAMLNull for index.
+ *
+ * Params: index = Index to use.
+ *
+ * Returns: Value corresponding to the index.
+ *
+ * Throws: NodeException if the index could not be found,
+ * non-integral index is used with a sequence or the node is
+ * not a collection.
+ */
+ ref inout(Node) opIndex(T)(T index) inout @safe
+ {
+ final switch (nodeID)
+ {
+ case NodeID.sequence:
+ checkSequenceIndex(index);
+ static if(isIntegral!T)
+ {
+ return getValue!(Node[])[index];
+ }
+ else
+ {
+ assert(false, "Only integers may index sequence nodes");
+ }
+ case NodeID.mapping:
+ auto idx = findPair(index);
+ if(idx >= 0)
+ {
+ return getValue!(Pair[])[idx].value;
+ }
+
+ string msg = "Mapping index not found" ~ (isSomeString!T ? ": " ~ to!string(index) : "");
+ throw new NodeException(msg, startMark_);
+ case NodeID.scalar:
+ case NodeID.invalid:
+ throw new NodeException("Trying to index a " ~ nodeTypeString ~ " node", startMark_);
+ }
+ }
+ ///
+ @safe unittest
+ {
+ Node narray = Node([11, 12, 13, 14]);
+ Node nmap = Node(["11", "12", "13", "14"], [11, 12, 13, 14]);
+
+ assert(narray[0].as!int == 11);
+ assert(null !is collectException(narray[42]));
+ assert(nmap["11"].as!int == 11);
+ assert(nmap["14"].as!int == 14);
+ }
+ @safe unittest
+ {
+ Node narray = Node([11, 12, 13, 14]);
+ Node nmap = Node(["11", "12", "13", "14"], [11, 12, 13, 14]);
+
+ assert(narray[0].as!int == 11);
+ assert(null !is collectException(narray[42]));
+ assert(nmap["11"].as!int == 11);
+ assert(nmap["14"].as!int == 14);
+ assert(null !is collectException(nmap["42"]));
+
+ narray.add(YAMLNull());
+ nmap.add(YAMLNull(), "Nothing");
+ assert(narray[4].as!YAMLNull == YAMLNull());
+ assert(nmap[YAMLNull()].as!string == "Nothing");
+
+ assertThrown!NodeException(nmap[11]);
+ assertThrown!NodeException(nmap[14]);
+ }
+
+ /** Determine if a collection contains specified value.
+ *
+ * If the node is a sequence, check if it contains the specified value.
+ * If it's a mapping, check if it has a value that matches specified value.
+ *
+ * Params: rhs = Item to look for. Use YAMLNull to check for a null value.
+ *
+ * Returns: true if rhs was found, false otherwise.
+ *
+ * Throws: NodeException if the node is not a collection.
+ */
+ bool contains(T)(T rhs) const
+ {
+ return contains_!(T, No.key, "contains")(rhs);
+ }
+ @safe unittest
+ {
+ auto mNode = Node(["1", "2", "3"]);
+ assert(mNode.contains("2"));
+ const cNode = Node(["1", "2", "3"]);
+ assert(cNode.contains("2"));
+ immutable iNode = Node(["1", "2", "3"]);
+ assert(iNode.contains("2"));
+ }
+
+
+ /** Determine if a mapping contains specified key.
+ *
+ * Params: rhs = Key to look for. Use YAMLNull to check for a null key.
+ *
+ * Returns: true if rhs was found, false otherwise.
+ *
+ * Throws: NodeException if the node is not a mapping.
+ */
+ bool containsKey(T)(T rhs) const
+ {
+ return contains_!(T, Yes.key, "containsKey")(rhs);
+ }
+
+ // Unittest for contains() and containsKey().
+ @safe unittest
+ {
+ auto seq = Node([1, 2, 3, 4, 5]);
+ assert(seq.contains(3));
+ assert(seq.contains(5));
+ assert(!seq.contains("5"));
+ assert(!seq.contains(6));
+ assert(!seq.contains(float.nan));
+ assertThrown!NodeException(seq.containsKey(5));
+
+ auto seq2 = Node(["1", "2"]);
+ assert(seq2.contains("1"));
+ assert(!seq2.contains(1));
+
+ auto map = Node(["1", "2", "3", "4"], [1, 2, 3, 4]);
+ assert(map.contains(1));
+ assert(!map.contains("1"));
+ assert(!map.contains(5));
+ assert(!map.contains(float.nan));
+ assert(map.containsKey("1"));
+ assert(map.containsKey("4"));
+ assert(!map.containsKey(1));
+ assert(!map.containsKey("5"));
+
+ assert(!seq.contains(YAMLNull()));
+ assert(!map.contains(YAMLNull()));
+ assert(!map.containsKey(YAMLNull()));
+ seq.add(YAMLNull());
+ map.add("Nothing", YAMLNull());
+ assert(seq.contains(YAMLNull()));
+ assert(map.contains(YAMLNull()));
+ assert(!map.containsKey(YAMLNull()));
+ map.add(YAMLNull(), "Nothing");
+ assert(map.containsKey(YAMLNull()));
+
+ auto map2 = Node([1, 2, 3, 4], [1, 2, 3, 4]);
+ assert(!map2.contains("1"));
+ assert(map2.contains(1));
+ assert(!map2.containsKey("1"));
+ assert(map2.containsKey(1));
+
+ // scalar
+ assertThrown!NodeException(Node(1).contains(4));
+ assertThrown!NodeException(Node(1).containsKey(4));
+
+ auto mapNan = Node([1.0, 2, double.nan], [1, double.nan, 5]);
+
+ assert(mapNan.contains(double.nan));
+ assert(mapNan.containsKey(double.nan));
+ }
+
+ /// Assignment (shallow copy) by value.
+ void opAssign()(auto ref Node rhs)
+ {
+ assumeWontThrow(setValue(rhs.value_));
+ startMark_ = rhs.startMark_;
+ tag_ = rhs.tag_;
+ scalarStyle = rhs.scalarStyle;
+ collectionStyle = rhs.collectionStyle;
+ }
+ // Unittest for opAssign().
+ @safe unittest
+ {
+ auto seq = Node([1, 2, 3, 4, 5]);
+ auto assigned = seq;
+ assert(seq == assigned,
+ "Node.opAssign() doesn't produce an equivalent copy");
+ }
+
+ /** Set element at specified index in a collection.
+ *
+ * This method can only be called on collection nodes.
+ *
+ * If the node is a sequence, index must be integral.
+ *
+ * If the node is a mapping, sets the _value corresponding to the first
+ * key matching index (including conversion, so e.g. "42" matches 42).
+ *
+ * If the node is a mapping and no key matches index, a new key-value
+ * pair is added to the mapping. In sequences the index must be in
+ * range. This ensures behavior siilar to D arrays and associative
+ * arrays.
+ *
+ * To set element at a null index, use YAMLNull for index.
+ *
+ * Params:
+ * value = Value to assign.
+ * index = Index of the value to set.
+ *
+ * Throws: NodeException if the node is not a collection, index is out
+ * of range or if a non-integral index is used on a sequence node.
+ */
+ void opIndexAssign(K, V)(V value, K index)
+ {
+ final switch (nodeID)
+ {
+ case NodeID.sequence:
+ checkSequenceIndex(index);
+ static if(isIntegral!K || is(Unqual!K == bool))
+ {
+ auto nodes = getValue!(Node[]);
+ static if(is(Unqual!V == Node)){nodes[index] = value;}
+ else {nodes[index] = Node(value);}
+ setValue(nodes);
+ return;
+ }
+ assert(false, "Only integers may index sequence nodes");
+ case NodeID.mapping:
+ const idx = findPair(index);
+ if(idx < 0){add(index, value);}
+ else
+ {
+ auto pairs = as!(Node.Pair[])();
+ static if(is(Unqual!V == Node)){pairs[idx].value = value;}
+ else {pairs[idx].value = Node(value);}
+ setValue(pairs);
+ }
+ return;
+ case NodeID.scalar:
+ case NodeID.invalid:
+ throw new NodeException("Trying to index a " ~ nodeTypeString ~ " node", startMark_);
+ }
+ }
+ @safe unittest
+ {
+ with(Node([1, 2, 3, 4, 3]))
+ {
+ opIndexAssign(42, 3);
+ assert(length == 5);
+ assert(opIndex(3).as!int == 42);
+
+ opIndexAssign(YAMLNull(), 0);
+ assert(opIndex(0) == YAMLNull());
+ }
+ with(Node(["1", "2", "3"], [4, 5, 6]))
+ {
+ opIndexAssign(42, "3");
+ opIndexAssign(123, 456);
+ assert(length == 4);
+ assert(opIndex("3").as!int == 42);
+ assert(opIndex(456).as!int == 123);
+
+ opIndexAssign(43, 3);
+ //3 and "3" should be different
+ assert(length == 5);
+ assert(opIndex("3").as!int == 42);
+ assert(opIndex(3).as!int == 43);
+
+ opIndexAssign(YAMLNull(), "2");
+ assert(opIndex("2") == YAMLNull());
+ }
+ }
+
+ /** Return a range object iterating over a sequence, getting each
+ * element as T.
+ *
+ * If T is Node, simply iterate over the nodes in the sequence.
+ * Otherwise, convert each node to T during iteration.
+ *
+ * Throws: NodeException if the node is not a sequence or an element
+ * could not be converted to specified type.
+ */
+ template sequence(T = Node)
+ {
+ struct Range(N)
+ {
+ N subnodes;
+ size_t position;
+
+ this(N nodes)
+ {
+ subnodes = nodes;
+ position = 0;
+ }
+
+ /* Input range functionality. */
+ bool empty() const @property { return position >= subnodes.length; }
+
+ void popFront()
+ {
+ enforce(!empty, "Attempted to popFront an empty sequence");
+ position++;
+ }
+
+ T front() const @property
+ {
+ enforce(!empty, "Attempted to take the front of an empty sequence");
+ static if (is(Unqual!T == Node))
+ return subnodes[position];
+ else
+ return subnodes[position].as!T;
+ }
+
+ /* Forward range functionality. */
+ Range save() { return this; }
+
+ /* Bidirectional range functionality. */
+ void popBack()
+ {
+ enforce(!empty, "Attempted to popBack an empty sequence");
+ subnodes = subnodes[0 .. $ - 1];
+ }
+
+ T back()
+ {
+ enforce(!empty, "Attempted to take the back of an empty sequence");
+ static if (is(Unqual!T == Node))
+ return subnodes[$ - 1];
+ else
+ return subnodes[$ - 1].as!T;
+ }
+
+ /* Random-access range functionality. */
+ size_t length() const @property { return subnodes.length; }
+ T opIndex(size_t index)
+ {
+ static if (is(Unqual!T == Node))
+ return subnodes[index];
+ else
+ return subnodes[index].as!T;
+ }
+
+ static assert(isInputRange!Range);
+ static assert(isForwardRange!Range);
+ static assert(isBidirectionalRange!Range);
+ static assert(isRandomAccessRange!Range);
+ }
+ auto sequence()
+ {
+ enforce(nodeID == NodeID.sequence,
+ new NodeException("Trying to 'sequence'-iterate over a " ~ nodeTypeString ~ " node",
+ startMark_));
+ return Range!(Node[])(get!(Node[]));
+ }
+ auto sequence() const
+ {
+ enforce(nodeID == NodeID.sequence,
+ new NodeException("Trying to 'sequence'-iterate over a " ~ nodeTypeString ~ " node",
+ startMark_));
+ return Range!(const(Node)[])(get!(Node[]));
+ }
+ }
+ @safe unittest
+ {
+ Node n1 = Node([1, 2, 3, 4]);
+ int[int] array;
+ Node n2 = Node(array);
+ const n3 = Node([1, 2, 3, 4]);
+
+ auto r = n1.sequence!int.map!(x => x * 10);
+ assert(r.equal([10, 20, 30, 40]));
+
+ assertThrown(n2.sequence);
+
+ auto r2 = n3.sequence!int.map!(x => x * 10);
+ assert(r2.equal([10, 20, 30, 40]));
+ }
+
+ /** Return a range object iterating over mapping's pairs.
+ *
+ * Throws: NodeException if the node is not a mapping.
+ *
+ */
+ template mapping()
+ {
+ struct Range(T)
+ {
+ T pairs;
+ size_t position;
+
+ this(T pairs) @safe
+ {
+ this.pairs = pairs;
+ position = 0;
+ }
+
+ /* Input range functionality. */
+ bool empty() @safe { return position >= pairs.length; }
+
+ void popFront() @safe
+ {
+ enforce(!empty, "Attempted to popFront an empty mapping");
+ position++;
+ }
+
+ auto front() @safe
+ {
+ enforce(!empty, "Attempted to take the front of an empty mapping");
+ return pairs[position];
+ }
+
+ /* Forward range functionality. */
+ Range save() @safe { return this; }
+
+ /* Bidirectional range functionality. */
+ void popBack() @safe
+ {
+ enforce(!empty, "Attempted to popBack an empty mapping");
+ pairs = pairs[0 .. $ - 1];
+ }
+
+ auto back() @safe
+ {
+ enforce(!empty, "Attempted to take the back of an empty mapping");
+ return pairs[$ - 1];
+ }
+
+ /* Random-access range functionality. */
+ size_t length() const @property @safe { return pairs.length; }
+ auto opIndex(size_t index) @safe { return pairs[index]; }
+
+ static assert(isInputRange!Range);
+ static assert(isForwardRange!Range);
+ static assert(isBidirectionalRange!Range);
+ static assert(isRandomAccessRange!Range);
+ }
+
+ auto mapping()
+ {
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to 'mapping'-iterate over a "
+ ~ nodeTypeString ~ " node", startMark_));
+ return Range!(Node.Pair[])(get!(Node.Pair[]));
+ }
+ auto mapping() const
+ {
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to 'mapping'-iterate over a "
+ ~ nodeTypeString ~ " node", startMark_));
+ return Range!(const(Node.Pair)[])(get!(Node.Pair[]));
+ }
+ }
+ @safe unittest
+ {
+ int[int] array;
+ Node n = Node(array);
+ n[1] = "foo";
+ n[2] = "bar";
+ n[3] = "baz";
+
+ string[int] test;
+ foreach (pair; n.mapping)
+ test[pair.key.as!int] = pair.value.as!string;
+
+ assert(test[1] == "foo");
+ assert(test[2] == "bar");
+ assert(test[3] == "baz");
+
+ int[int] constArray = [1: 2, 3: 4];
+ const x = Node(constArray);
+ foreach (pair; x.mapping)
+ assert(pair.value == constArray[pair.key.as!int]);
+ }
+
+ /** Return a range object iterating over mapping's keys.
+ *
+ * If K is Node, simply iterate over the keys in the mapping.
+ * Otherwise, convert each key to T during iteration.
+ *
+ * Throws: NodeException if the nodes is not a mapping or an element
+ * could not be converted to specified type.
+ */
+ auto mappingKeys(K = Node)() const
+ {
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to 'mappingKeys'-iterate over a "
+ ~ nodeTypeString ~ " node", startMark_));
+ static if (is(Unqual!K == Node))
+ return mapping.map!(pair => pair.key);
+ else
+ return mapping.map!(pair => pair.key.as!K);
+ }
+ @safe unittest
+ {
+ int[int] array;
+ Node m1 = Node(array);
+ m1["foo"] = 2;
+ m1["bar"] = 3;
+
+ assert(m1.mappingKeys.equal(["foo", "bar"]) || m1.mappingKeys.equal(["bar", "foo"]));
+
+ const cm1 = Node(["foo": 2, "bar": 3]);
+
+ assert(cm1.mappingKeys.equal(["foo", "bar"]) || cm1.mappingKeys.equal(["bar", "foo"]));
+ }
+
+ /** Return a range object iterating over mapping's values.
+ *
+ * If V is Node, simply iterate over the values in the mapping.
+ * Otherwise, convert each key to V during iteration.
+ *
+ * Throws: NodeException if the nodes is not a mapping or an element
+ * could not be converted to specified type.
+ */
+ auto mappingValues(V = Node)() const
+ {
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to 'mappingValues'-iterate over a "
+ ~ nodeTypeString ~ " node", startMark_));
+ static if (is(Unqual!V == Node))
+ return mapping.map!(pair => pair.value);
+ else
+ return mapping.map!(pair => pair.value.as!V);
+ }
+ @safe unittest
+ {
+ int[int] array;
+ Node m1 = Node(array);
+ m1["foo"] = 2;
+ m1["bar"] = 3;
+
+ assert(m1.mappingValues.equal([2, 3]) || m1.mappingValues.equal([3, 2]));
+
+ const cm1 = Node(["foo": 2, "bar": 3]);
+
+ assert(cm1.mappingValues.equal([2, 3]) || cm1.mappingValues.equal([3, 2]));
+ }
+
+
+ /** Foreach over a sequence, getting each element as T.
+ *
+ * If T is Node, simply iterate over the nodes in the sequence.
+ * Otherwise, convert each node to T during iteration.
+ *
+ * Throws: NodeException if the node is not a sequence or an
+ * element could not be converted to specified type.
+ */
+ int opApply(D)(D dg) if (isDelegate!D && (Parameters!D.length == 1))
+ {
+ enforce(nodeID == NodeID.sequence,
+ new NodeException("Trying to sequence-foreach over a " ~ nodeTypeString ~ " node",
+ startMark_));
+
+ int result;
+ foreach(ref node; get!(Node[]))
+ {
+ static if(is(Unqual!(Parameters!D[0]) == Node))
+ {
+ result = dg(node);
+ }
+ else
+ {
+ Parameters!D[0] temp = node.as!(Parameters!D[0]);
+ result = dg(temp);
+ }
+ if(result){break;}
+ }
+ return result;
+ }
+ /// ditto
+ int opApply(D)(D dg) const if (isDelegate!D && (Parameters!D.length == 1))
+ {
+ enforce(nodeID == NodeID.sequence,
+ new NodeException("Trying to sequence-foreach over a " ~ nodeTypeString ~ " node",
+ startMark_));
+
+ int result;
+ foreach(ref node; get!(Node[]))
+ {
+ static if(is(Unqual!(Parameters!D[0]) == Node))
+ {
+ result = dg(node);
+ }
+ else
+ {
+ Parameters!D[0] temp = node.as!(Parameters!D[0]);
+ result = dg(temp);
+ }
+ if(result){break;}
+ }
+ return result;
+ }
+ @safe unittest
+ {
+ Node n1 = Node(11);
+ Node n2 = Node(12);
+ Node n3 = Node(13);
+ Node n4 = Node(14);
+ Node narray = Node([n1, n2, n3, n4]);
+ const cNArray = narray;
+
+ int[] array, array2, array3;
+ foreach(int value; narray)
+ {
+ array ~= value;
+ }
+ foreach(Node node; narray)
+ {
+ array2 ~= node.as!int;
+ }
+ foreach (const Node node; cNArray)
+ {
+ array3 ~= node.as!int;
+ }
+ assert(array == [11, 12, 13, 14]);
+ assert(array2 == [11, 12, 13, 14]);
+ assert(array3 == [11, 12, 13, 14]);
+ }
+ @safe unittest
+ {
+ string[] testStrs = ["1", "2", "3"];
+ auto node1 = Node(testStrs);
+ int i = 0;
+ foreach (string elem; node1)
+ {
+ assert(elem == testStrs[i]);
+ i++;
+ }
+ const node2 = Node(testStrs);
+ i = 0;
+ foreach (string elem; node2)
+ {
+ assert(elem == testStrs[i]);
+ i++;
+ }
+ immutable node3 = Node(testStrs);
+ i = 0;
+ foreach (string elem; node3)
+ {
+ assert(elem == testStrs[i]);
+ i++;
+ }
+ }
+ @safe unittest
+ {
+ auto node = Node(["a":1, "b":2, "c":3]);
+ const cNode = node;
+ assertThrown({foreach (Node n; node) {}}());
+ assertThrown({foreach (const Node n; cNode) {}}());
+ }
+
+ /** Foreach over a mapping, getting each key/value as K/V.
+ *
+ * If the K and/or V is Node, simply iterate over the nodes in the mapping.
+ * Otherwise, convert each key/value to T during iteration.
+ *
+ * Throws: NodeException if the node is not a mapping or an
+ * element could not be converted to specified type.
+ */
+ int opApply(DG)(DG dg) if (isDelegate!DG && (Parameters!DG.length == 2))
+ {
+ alias K = Parameters!DG[0];
+ alias V = Parameters!DG[1];
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to mapping-foreach over a " ~ nodeTypeString ~ " node",
+ startMark_));
+
+ int result;
+ foreach(ref pair; get!(Node.Pair[]))
+ {
+ static if(is(Unqual!K == Node) && is(Unqual!V == Node))
+ {
+ result = dg(pair.key, pair.value);
+ }
+ else static if(is(Unqual!K == Node))
+ {
+ V tempValue = pair.value.as!V;
+ result = dg(pair.key, tempValue);
+ }
+ else static if(is(Unqual!V == Node))
+ {
+ K tempKey = pair.key.as!K;
+ result = dg(tempKey, pair.value);
+ }
+ else
+ {
+ K tempKey = pair.key.as!K;
+ V tempValue = pair.value.as!V;
+ result = dg(tempKey, tempValue);
+ }
+
+ if(result){break;}
+ }
+ return result;
+ }
+ /// ditto
+ int opApply(DG)(DG dg) const if (isDelegate!DG && (Parameters!DG.length == 2))
+ {
+ alias K = Parameters!DG[0];
+ alias V = Parameters!DG[1];
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to mapping-foreach over a " ~ nodeTypeString ~ " node",
+ startMark_));
+
+ int result;
+ foreach(ref pair; get!(Node.Pair[]))
+ {
+ static if(is(Unqual!K == Node) && is(Unqual!V == Node))
+ {
+ result = dg(pair.key, pair.value);
+ }
+ else static if(is(Unqual!K == Node))
+ {
+ V tempValue = pair.value.as!V;
+ result = dg(pair.key, tempValue);
+ }
+ else static if(is(Unqual!V == Node))
+ {
+ K tempKey = pair.key.as!K;
+ result = dg(tempKey, pair.value);
+ }
+ else
+ {
+ K tempKey = pair.key.as!K;
+ V tempValue = pair.value.as!V;
+ result = dg(tempKey, tempValue);
+ }
+
+ if(result){break;}
+ }
+ return result;
+ }
+ @safe unittest
+ {
+ Node n1 = Node(cast(long)11);
+ Node n2 = Node(cast(long)12);
+ Node n3 = Node(cast(long)13);
+ Node n4 = Node(cast(long)14);
+
+ Node k1 = Node("11");
+ Node k2 = Node("12");
+ Node k3 = Node("13");
+ Node k4 = Node("14");
+
+ Node nmap1 = Node([Pair(k1, n1),
+ Pair(k2, n2),
+ Pair(k3, n3),
+ Pair(k4, n4)]);
+
+ int[string] expected = ["11" : 11,
+ "12" : 12,
+ "13" : 13,
+ "14" : 14];
+ int[string] array;
+ foreach(string key, int value; nmap1)
+ {
+ array[key] = value;
+ }
+ assert(array == expected);
+
+ Node nmap2 = Node([Pair(k1, Node(cast(long)5)),
+ Pair(k2, Node(true)),
+ Pair(k3, Node(cast(real)1.0)),
+ Pair(k4, Node("yarly"))]);
+
+ foreach(string key, Node value; nmap2)
+ {
+ switch(key)
+ {
+ case "11": assert(value.as!int == 5 ); break;
+ case "12": assert(value.as!bool == true ); break;
+ case "13": assert(value.as!float == 1.0 ); break;
+ case "14": assert(value.as!string == "yarly"); break;
+ default: assert(false);
+ }
+ }
+ const nmap3 = nmap2;
+
+ foreach(const Node key, const Node value; nmap3)
+ {
+ switch(key.as!string)
+ {
+ case "11": assert(value.as!int == 5 ); break;
+ case "12": assert(value.as!bool == true ); break;
+ case "13": assert(value.as!float == 1.0 ); break;
+ case "14": assert(value.as!string == "yarly"); break;
+ default: assert(false);
+ }
+ }
+ }
+ @safe unittest
+ {
+ string[int] testStrs = [0: "1", 1: "2", 2: "3"];
+ auto node1 = Node(testStrs);
+ foreach (const int i, string elem; node1)
+ {
+ assert(elem == testStrs[i]);
+ }
+ const node2 = Node(testStrs);
+ foreach (const int i, string elem; node2)
+ {
+ assert(elem == testStrs[i]);
+ }
+ immutable node3 = Node(testStrs);
+ foreach (const int i, string elem; node3)
+ {
+ assert(elem == testStrs[i]);
+ }
+ }
+ @safe unittest
+ {
+ auto node = Node(["a", "b", "c"]);
+ const cNode = node;
+ assertThrown({foreach (Node a, Node b; node) {}}());
+ assertThrown({foreach (const Node a, const Node b; cNode) {}}());
+ }
+
+ /** Add an element to a sequence.
+ *
+ * This method can only be called on sequence nodes.
+ *
+ * If value is a node, it is copied to the sequence directly. Otherwise
+ * value is converted to a node and then stored in the sequence.
+ *
+ * $(P When emitting, all values in the sequence will be emitted. When
+ * using the !!set tag, the user needs to ensure that all elements in
+ * the sequence are unique, otherwise $(B invalid) YAML code will be
+ * emitted.)
+ *
+ * Params: value = Value to _add to the sequence.
+ */
+ void add(T)(T value)
+ {
+ if (!isValid)
+ {
+ setValue(Node[].init);
+ }
+ enforce(nodeID == NodeID.sequence,
+ new NodeException("Trying to add an element to a " ~ nodeTypeString ~ " node", startMark_));
+
+ auto nodes = get!(Node[])();
+ static if(is(Unqual!T == Node)){nodes ~= value;}
+ else {nodes ~= Node(value);}
+ setValue(nodes);
+ }
+ @safe unittest
+ {
+ with(Node([1, 2, 3, 4]))
+ {
+ add(5.0f);
+ assert(opIndex(4).as!float == 5.0f);
+ }
+ with(Node())
+ {
+ add(5.0f);
+ assert(opIndex(0).as!float == 5.0f);
+ }
+ with(Node(5.0f))
+ {
+ assertThrown!NodeException(add(5.0f));
+ }
+ with(Node([5.0f : true]))
+ {
+ assertThrown!NodeException(add(5.0f));
+ }
+ }
+
+ /** Add a key-value pair to a mapping.
+ *
+ * This method can only be called on mapping nodes.
+ *
+ * If key and/or value is a node, it is copied to the mapping directly.
+ * Otherwise it is converted to a node and then stored in the mapping.
+ *
+ * $(P It is possible for the same key to be present more than once in a
+ * mapping. When emitting, all key-value pairs will be emitted.
+ * This is useful with the "!!pairs" tag, but will result in
+ * $(B invalid) YAML with "!!map" and "!!omap" tags.)
+ *
+ * Params: key = Key to _add.
+ * value = Value to _add.
+ */
+ void add(K, V)(K key, V value)
+ {
+ if (!isValid)
+ {
+ setValue(Node.Pair[].init);
+ }
+ enforce(nodeID == NodeID.mapping,
+ new NodeException("Trying to add a key-value pair to a " ~
+ nodeTypeString ~ " node",
+ startMark_));
+
+ auto pairs = get!(Node.Pair[])();
+ pairs ~= Pair(key, value);
+ setValue(pairs);
+ }
+ @safe unittest
+ {
+ with(Node([1, 2], [3, 4]))
+ {
+ add(5, "6");
+ assert(opIndex(5).as!string == "6");
+ }
+ with(Node())
+ {
+ add(5, "6");
+ assert(opIndex(5).as!string == "6");
+ }
+ with(Node(5.0f))
+ {
+ assertThrown!NodeException(add(5, "6"));
+ }
+ with(Node([5.0f]))
+ {
+ assertThrown!NodeException(add(5, "6"));
+ }
+ }
+
+ /** Determine whether a key is in a mapping, and access its value.
+ *
+ * This method can only be called on mapping nodes.
+ *
+ * Params: key = Key to search for.
+ *
+ * Returns: A pointer to the value (as a Node) corresponding to key,
+ * or null if not found.
+ *
+ * Note: Any modification to the node can invalidate the returned
+ * pointer.
+ *
+ * See_Also: contains
+ */
+ inout(Node*) opBinaryRight(string op, K)(K key) inout
+ if (op == "in")
+ {
+ enforce(nodeID == NodeID.mapping, new NodeException("Trying to use 'in' on a " ~
+ nodeTypeString ~ " node", startMark_));
+
+ auto idx = findPair(key);
+ if(idx < 0)
+ {
+ return null;
+ }
+ else
+ {
+ return &(get!(Node.Pair[])[idx].value);
+ }
+ }
+ @safe unittest
+ {
+ auto mapping = Node(["foo", "baz"], ["bar", "qux"]);
+ assert("bad" !in mapping && ("bad" in mapping) is null);
+ Node* foo = "foo" in mapping;
+ assert(foo !is null);
+ assert(*foo == Node("bar"));
+ assert(foo.get!string == "bar");
+ *foo = Node("newfoo");
+ assert(mapping["foo"] == Node("newfoo"));
+ }
+ @safe unittest
+ {
+ auto mNode = Node(["a": 2]);
+ assert("a" in mNode);
+ const cNode = Node(["a": 2]);
+ assert("a" in cNode);
+ immutable iNode = Node(["a": 2]);
+ assert("a" in iNode);
+ }
+
+ /** Remove first (if any) occurence of a value in a collection.
+ *
+ * This method can only be called on collection nodes.
+ *
+ * If the node is a sequence, the first node matching value is removed.
+ * If the node is a mapping, the first key-value pair where _value
+ * matches specified value is removed.
+ *
+ * Params: rhs = Value to _remove.
+ *
+ * Throws: NodeException if the node is not a collection.
+ */
+ void remove(T)(T rhs)
+ {
+ remove_!(T, No.key, "remove")(rhs);
+ }
+ @safe unittest
+ {
+ with(Node([1, 2, 3, 4, 3]))
+ {
+ remove(3);
+ assert(length == 4);
+ assert(opIndex(2).as!int == 4);
+ assert(opIndex(3).as!int == 3);
+
+ add(YAMLNull());
+ assert(length == 5);
+ remove(YAMLNull());
+ assert(length == 4);
+ }
+ with(Node(["1", "2", "3"], [4, 5, 6]))
+ {
+ remove(4);
+ assert(length == 2);
+ add("nullkey", YAMLNull());
+ assert(length == 3);
+ remove(YAMLNull());
+ assert(length == 2);
+ }
+ }
+
+ /** Remove element at the specified index of a collection.
+ *
+ * This method can only be called on collection nodes.
+ *
+ * If the node is a sequence, index must be integral.
+ *
+ * If the node is a mapping, remove the first key-value pair where
+ * key matches index.
+ *
+ * If the node is a mapping and no key matches index, nothing is removed
+ * and no exception is thrown. This ensures behavior siilar to D arrays
+ * and associative arrays.
+ *
+ * Params: index = Index to remove at.
+ *
+ * Throws: NodeException if the node is not a collection, index is out
+ * of range or if a non-integral index is used on a sequence node.
+ */
+ void removeAt(T)(T index)
+ {
+ remove_!(T, Yes.key, "removeAt")(index);
+ }
+ @safe unittest
+ {
+ with(Node([1, 2, 3, 4, 3]))
+ {
+ removeAt(3);
+ assertThrown!NodeException(removeAt("3"));
+ assert(length == 4);
+ assert(opIndex(3).as!int == 3);
+ }
+ with(Node(["1", "2", "3"], [4, 5, 6]))
+ {
+ // no integer 2 key, so don't remove anything
+ removeAt(2);
+ assert(length == 3);
+ removeAt("2");
+ assert(length == 2);
+ add(YAMLNull(), "nullval");
+ assert(length == 3);
+ removeAt(YAMLNull());
+ assert(length == 2);
+ }
+ }
+
+ /// Compare with another _node.
+ int opCmp(const ref Node rhs) const @safe
+ {
+ // Compare tags - if equal or both null, we need to compare further.
+ const tagCmp = (tag_ is null) ? (rhs.tag_ is null) ? 0 : -1
+ : (rhs.tag_ is null) ? 1 : std.algorithm.comparison.cmp(tag_, rhs.tag_);
+ if(tagCmp != 0){return tagCmp;}
+
+ static int cmp(T1, T2)(T1 a, T2 b)
+ {
+ return a > b ? 1 :
+ a < b ? -1 :
+ 0;
+ }
+
+ // Compare validity: if both valid, we have to compare further.
+ const v1 = isValid;
+ const v2 = rhs.isValid;
+ if(!v1){return v2 ? -1 : 0;}
+ if(!v2){return 1;}
+
+ const typeCmp = cmp(type, rhs.type);
+ if(typeCmp != 0){return typeCmp;}
+
+ static int compareCollections(T)(const ref Node lhs, const ref Node rhs)
+ {
+ const c1 = lhs.getValue!T;
+ const c2 = rhs.getValue!T;
+ if(c1 is c2){return 0;}
+ if(c1.length != c2.length)
+ {
+ return cmp(c1.length, c2.length);
+ }
+ // Equal lengths, compare items.
+ foreach(i; 0 .. c1.length)
+ {
+ const itemCmp = c1[i].opCmp(c2[i]);
+ if(itemCmp != 0){return itemCmp;}
+ }
+ return 0;
+ }
+
+ final switch(type)
+ {
+ case NodeType.string:
+ return std.algorithm.cmp(getValue!string,
+ rhs.getValue!string);
+ case NodeType.integer:
+ return cmp(getValue!long, rhs.getValue!long);
+ case NodeType.boolean:
+ const b1 = getValue!bool;
+ const b2 = rhs.getValue!bool;
+ return b1 ? b2 ? 0 : 1
+ : b2 ? -1 : 0;
+ case NodeType.binary:
+ const b1 = getValue!(ubyte[]);
+ const b2 = rhs.getValue!(ubyte[]);
+ return std.algorithm.cmp(b1, b2);
+ case NodeType.null_:
+ return 0;
+ case NodeType.decimal:
+ const r1 = getValue!real;
+ const r2 = rhs.getValue!real;
+ if(isNaN(r1))
+ {
+ return isNaN(r2) ? 0 : -1;
+ }
+ if(isNaN(r2))
+ {
+ return 1;
+ }
+ // Fuzzy equality.
+ if(r1 <= r2 + real.epsilon && r1 >= r2 - real.epsilon)
+ {
+ return 0;
+ }
+ return cmp(r1, r2);
+ case NodeType.timestamp:
+ const t1 = getValue!SysTime;
+ const t2 = rhs.getValue!SysTime;
+ return cmp(t1, t2);
+ case NodeType.mapping:
+ return compareCollections!(Pair[])(this, rhs);
+ case NodeType.sequence:
+ return compareCollections!(Node[])(this, rhs);
+ case NodeType.merge:
+ assert(false, "Cannot compare merge nodes");
+ case NodeType.invalid:
+ assert(false, "Cannot compare invalid nodes");
+ }
+ }
+
+ // Ensure opCmp is symmetric for collections
+ @safe unittest
+ {
+ auto node1 = Node(
+ [
+ Node("New York Yankees", "tag:yaml.org,2002:str"),
+ Node("Atlanta Braves", "tag:yaml.org,2002:str")
+ ], "tag:yaml.org,2002:seq"
+ );
+ auto node2 = Node(
+ [
+ Node("Detroit Tigers", "tag:yaml.org,2002:str"),
+ Node("Chicago cubs", "tag:yaml.org,2002:str")
+ ], "tag:yaml.org,2002:seq"
+ );
+ assert(node1 > node2);
+ assert(node2 < node1);
+ }
+
+ // Compute hash of the node.
+ hash_t toHash() nothrow const @trusted
+ {
+ const valueHash = value_.toHash();
+
+ return tag_ is null ? valueHash : tag_.hashOf(valueHash);
+ }
+ @safe unittest
+ {
+ assert(Node(42).toHash() != Node(41).toHash());
+ assert(Node(42).toHash() != Node(42, "some-tag").toHash());
+ }
+
+ /// Get type of the node value.
+ @property NodeType type() const @safe nothrow
+ {
+ if (value_.type is typeid(bool))
+ {
+ return NodeType.boolean;
+ }
+ else if (value_.type is typeid(long))
+ {
+ return NodeType.integer;
+ }
+ else if (value_.type is typeid(Node[]))
+ {
+ return NodeType.sequence;
+ }
+ else if (value_.type is typeid(ubyte[]))
+ {
+ return NodeType.binary;
+ }
+ else if (value_.type is typeid(string))
+ {
+ return NodeType.string;
+ }
+ else if (value_.type is typeid(Node.Pair[]))
+ {
+ return NodeType.mapping;
+ }
+ else if (value_.type is typeid(SysTime))
+ {
+ return NodeType.timestamp;
+ }
+ else if (value_.type is typeid(YAMLNull))
+ {
+ return NodeType.null_;
+ }
+ else if (value_.type is typeid(YAMLMerge))
+ {
+ return NodeType.merge;
+ }
+ else if (value_.type is typeid(real))
+ {
+ return NodeType.decimal;
+ }
+ else if (!value_.hasValue)
+ {
+ return NodeType.invalid;
+ }
+ else assert(0, text(value_.type));
+ }
+
+ /// Get the kind of node this is.
+ @property NodeID nodeID() const @safe nothrow
+ {
+ final switch (type)
+ {
+ case NodeType.sequence:
+ return NodeID.sequence;
+ case NodeType.mapping:
+ return NodeID.mapping;
+ case NodeType.boolean:
+ case NodeType.integer:
+ case NodeType.binary:
+ case NodeType.string:
+ case NodeType.timestamp:
+ case NodeType.null_:
+ case NodeType.merge:
+ case NodeType.decimal:
+ return NodeID.scalar;
+ case NodeType.invalid:
+ return NodeID.invalid;
+ }
+ }
+ package:
+
+ // Get a string representation of the node tree. Used for debugging.
+ //
+ // Params: level = Level of the node in the tree.
+ //
+ // Returns: String representing the node tree.
+ @property string debugString(uint level = 0) const @safe
+ {
+ string indent;
+ foreach(i; 0 .. level){indent ~= " ";}
+
+ final switch (nodeID)
+ {
+ case NodeID.invalid:
+ return indent ~ "invalid";
+ case NodeID.sequence:
+ string result = indent ~ "sequence:\n";
+ foreach(ref node; get!(Node[]))
+ {
+ result ~= node.debugString(level + 1);
+ }
+ return result;
+ case NodeID.mapping:
+ string result = indent ~ "mapping:\n";
+ foreach(ref pair; get!(Node.Pair[]))
+ {
+ result ~= indent ~ " pair\n";
+ result ~= pair.key.debugString(level + 2);
+ result ~= pair.value.debugString(level + 2);
+ }
+ return result;
+ case NodeID.scalar:
+ return indent ~ "scalar(" ~
+ (convertsTo!string ? get!string : text(type)) ~ ")\n";
+ }
+ }
+
+
+ public:
+ @property string nodeTypeString() const @safe nothrow
+ {
+ final switch (nodeID)
+ {
+ case NodeID.mapping:
+ return "mapping";
+ case NodeID.sequence:
+ return "sequence";
+ case NodeID.scalar:
+ return "scalar";
+ case NodeID.invalid:
+ return "invalid";
+ }
+ }
+
+ // Determine if the value can be converted to specified type.
+ @property bool convertsTo(T)() const
+ {
+ if(isType!T){return true;}
+
+ // Every type allowed in Value should be convertible to string.
+ static if(isSomeString!T) {return true;}
+ else static if(isFloatingPoint!T){return type.among!(NodeType.integer, NodeType.decimal);}
+ else static if(isIntegral!T) {return type == NodeType.integer;}
+ else static if(is(Unqual!T==bool)){return type == NodeType.boolean;}
+ else {return false;}
+ }
+ /**
+ * Sets the style of this node when dumped.
+ *
+ * Params: style = Any valid style.
+ */
+ void setStyle(CollectionStyle style) @safe
+ {
+ enforce(!isValid || (nodeID.among(NodeID.mapping, NodeID.sequence)), new NodeException(
+ "Cannot set collection style for non-collection nodes", startMark_));
+ collectionStyle = style;
+ }
+ /// Ditto
+ void setStyle(ScalarStyle style) @safe
+ {
+ enforce(!isValid || (nodeID == NodeID.scalar), new NodeException(
+ "Cannot set scalar style for non-scalar nodes", startMark_));
+ scalarStyle = style;
+ }
+ ///
+ @safe unittest
+ {
+ import dyaml.dumper;
+ auto stream = new Appender!string();
+ auto node = Node([1, 2, 3, 4, 5]);
+ node.setStyle(CollectionStyle.block);
+
+ auto dumper = dumper();
+ dumper.dump(stream, node);
+ }
+ ///
+ @safe unittest
+ {
+ import dyaml.dumper;
+ auto stream = new Appender!string();
+ auto node = Node(4);
+ node.setStyle(ScalarStyle.literal);
+
+ auto dumper = dumper();
+ dumper.dump(stream, node);
+ }
+ @safe unittest
+ {
+ assertThrown!NodeException(Node(4).setStyle(CollectionStyle.block));
+ assertThrown!NodeException(Node([4]).setStyle(ScalarStyle.literal));
+ }
+ @safe unittest
+ {
+ import dyaml.dumper;
+ {
+ auto stream = new Appender!string();
+ auto node = Node([1, 2, 3, 4, 5]);
+ node.setStyle(CollectionStyle.block);
+ auto dumper = dumper();
+ dumper.explicitEnd = false;
+ dumper.explicitStart = false;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+
+ //Block style should start with a hyphen.
+ assert(stream.data[0] == '-');
+ }
+ {
+ auto stream = new Appender!string();
+ auto node = Node([1, 2, 3, 4, 5]);
+ node.setStyle(CollectionStyle.flow);
+ auto dumper = dumper();
+ dumper.explicitEnd = false;
+ dumper.explicitStart = false;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+
+ //Flow style should start with a bracket.
+ assert(stream.data[0] == '[');
+ }
+ {
+ auto stream = new Appender!string();
+ auto node = Node(1);
+ node.setStyle(ScalarStyle.singleQuoted);
+ auto dumper = dumper();
+ dumper.explicitEnd = false;
+ dumper.explicitStart = false;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+
+ assert(stream.data == "!!int '1'\n");
+ }
+ {
+ auto stream = new Appender!string();
+ auto node = Node(1);
+ node.setStyle(ScalarStyle.doubleQuoted);
+ auto dumper = dumper();
+ dumper.explicitEnd = false;
+ dumper.explicitStart = false;
+ dumper.YAMLVersion = null;
+ dumper.dump(stream, node);
+
+ assert(stream.data == "!!int \"1\"\n");
+ }
+ }
+
+ private:
+ // Determine if the value stored by the node is of specified type.
+ //
+ // This only works for default YAML types, not for user defined types.
+ @property bool isType(T)() const
+ {
+ return value_.type is typeid(Unqual!T);
+ }
+
+ // Implementation of contains() and containsKey().
+ bool contains_(T, Flag!"key" key, string func)(T rhs) const
+ {
+ final switch (nodeID)
+ {
+ case NodeID.mapping:
+ return findPair!(T, key)(rhs) >= 0;
+ case NodeID.sequence:
+ static if(!key)
+ {
+ foreach(ref node; getValue!(Node[]))
+ {
+ if(node == rhs){return true;}
+ }
+ return false;
+ }
+ else
+ {
+ throw new NodeException("Trying to use " ~ func ~ "() on a " ~ nodeTypeString ~ " node",
+ startMark_);
+ }
+ case NodeID.scalar:
+ case NodeID.invalid:
+ throw new NodeException("Trying to use " ~ func ~ "() on a " ~ nodeTypeString ~ " node",
+ startMark_);
+ }
+
+ }
+
+ // Implementation of remove() and removeAt()
+ void remove_(T, Flag!"key" key, string func)(T rhs)
+ {
+ static void removeElem(E, I)(ref Node node, I index)
+ {
+ auto elems = node.getValue!(E[]);
+ moveAll(elems[cast(size_t)index + 1 .. $], elems[cast(size_t)index .. $ - 1]);
+ elems.length = elems.length - 1;
+ node.setValue(elems);
+ }
+
+ final switch (nodeID)
+ {
+ case NodeID.mapping:
+ const index = findPair!(T, key)(rhs);
+ if(index >= 0){removeElem!Pair(this, index);}
+ break;
+ case NodeID.sequence:
+ static long getIndex(ref Node node, ref T rhs)
+ {
+ foreach(idx, ref elem; node.get!(Node[]))
+ {
+ if(elem.convertsTo!T && elem.as!(T, No.stringConversion) == rhs)
+ {
+ return idx;
+ }
+ }
+ return -1;
+ }
+
+ const index = select!key(rhs, getIndex(this, rhs));
+
+ // This throws if the index is not integral.
+ checkSequenceIndex(index);
+
+ static if(isIntegral!(typeof(index))){removeElem!Node(this, index); break; }
+ else {assert(false, "Non-integral sequence index");}
+ case NodeID.scalar:
+ case NodeID.invalid:
+ throw new NodeException("Trying to " ~ func ~ "() from a " ~ nodeTypeString ~ " node",
+ startMark_);
+ }
+ }
+
+ // Get index of pair with key (or value, if key is false) matching index.
+ // Cannot be inferred @safe due to https://issues.dlang.org/show_bug.cgi?id=16528
+ sizediff_t findPair(T, Flag!"key" key = Yes.key)(const ref T index) const @safe
+ {
+ const pairs = getValue!(Pair[])();
+ const(Node)* node;
+ foreach(idx, ref const(Pair) pair; pairs)
+ {
+ static if(key){node = &pair.key;}
+ else {node = &pair.value;}
+
+
+ const bool typeMatch = (isFloatingPoint!T && (node.type.among!(NodeType.integer, NodeType.decimal))) ||
+ (isIntegral!T && node.type == NodeType.integer) ||
+ (is(Unqual!T==bool) && node.type == NodeType.boolean) ||
+ (isSomeString!T && node.type == NodeType.string) ||
+ (node.isType!T);
+ if(typeMatch && *node == index)
+ {
+ return idx;
+ }
+ }
+ return -1;
+ }
+
+ // Check if index is integral and in range.
+ void checkSequenceIndex(T)(T index) const
+ {
+ assert(nodeID == NodeID.sequence,
+ "checkSequenceIndex() called on a " ~ nodeTypeString ~ " node");
+
+ static if(!isIntegral!T)
+ {
+ throw new NodeException("Indexing a sequence with a non-integral type.", startMark_);
+ }
+ else
+ {
+ enforce(index >= 0 && index < getValue!(Node[]).length,
+ new NodeException("Sequence index out of range: " ~ to!string(index),
+ startMark_));
+ }
+ }
+ // Safe wrapper for getting a value out of the variant.
+ inout(T) getValue(T)() @trusted inout
+ {
+ return value_.get!T;
+ }
+ // Safe wrapper for coercing a value out of the variant.
+ inout(T) coerceValue(T)() @trusted inout
+ {
+ return (cast(Value)value_).coerce!T;
+ }
+ // Safe wrapper for setting a value for the variant.
+ void setValue(T)(T value) @trusted
+ {
+ static if (allowed!T)
+ {
+ value_ = value;
+ }
+ else
+ {
+ auto tmpNode = cast(Node)value;
+ tag_ = tmpNode.tag;
+ scalarStyle = tmpNode.scalarStyle;
+ collectionStyle = tmpNode.collectionStyle;
+ value_ = tmpNode.value_;
+ }
+ }
+}
+
+package:
+// Merge pairs into an array of pairs based on merge rules in the YAML spec.
+//
+// Any new pair will only be added if there is not already a pair
+// with the same key.
+//
+// Params: pairs = Appender managing the array of pairs to merge into.
+// toMerge = Pairs to merge.
+void merge(ref Appender!(Node.Pair[]) pairs, Node.Pair[] toMerge) @safe
+{
+ bool eq(ref Node.Pair a, ref Node.Pair b){return a.key == b.key;}
+
+ foreach(ref pair; toMerge) if(!canFind!eq(pairs.data, pair))
+ {
+ pairs.put(pair);
+ }
+}
+
+enum hasNodeConstructor(T) = hasSimpleNodeConstructor!T || hasExpandedNodeConstructor!T;
+template hasSimpleNodeConstructor(T)
+{
+ static if (is(T == struct))
+ {
+ enum hasSimpleNodeConstructor = is(typeof(T(Node.init)));
+ }
+ else static if (is(T == class))
+ {
+ enum hasSimpleNodeConstructor = is(typeof(new inout T(Node.init)));
+ }
+ else enum hasSimpleNodeConstructor = false;
+}
+template hasExpandedNodeConstructor(T)
+{
+ static if (is(T == struct))
+ {
+ enum hasExpandedNodeConstructor = is(typeof(T(Node.init, "")));
+ }
+ else static if (is(T == class))
+ {
+ enum hasExpandedNodeConstructor = is(typeof(new inout T(Node.init, "")));
+ }
+ else enum hasExpandedNodeConstructor = false;
+}
+enum castableToNode(T) = (is(T == struct) || is(T == class)) && is(typeof(T.opCast!Node()) : Node);
diff --git a/src/ext_depends/D-YAML/source/dyaml/package.d b/src/ext_depends/D-YAML/source/dyaml/package.d
new file mode 100644
index 0000000..e61b716
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/package.d
@@ -0,0 +1,15 @@
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml;
+
+public import dyaml.dumper;
+public import dyaml.encoding;
+public import dyaml.exception;
+public import dyaml.linebreak;
+public import dyaml.loader;
+public import dyaml.resolver;
+public import dyaml.style;
+public import dyaml.node;
diff --git a/src/ext_depends/D-YAML/source/dyaml/parser.d b/src/ext_depends/D-YAML/source/dyaml/parser.d
new file mode 100644
index 0000000..7e0b78a
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/parser.d
@@ -0,0 +1,958 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * YAML parser.
+ * Code based on PyYAML: http://www.pyyaml.org
+ */
+module dyaml.parser;
+
+
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.exception;
+import std.typecons;
+
+import dyaml.event;
+import dyaml.exception;
+import dyaml.scanner;
+import dyaml.style;
+import dyaml.token;
+import dyaml.tagdirective;
+
+
+package:
+/**
+ * The following YAML grammar is LL(1) and is parsed by a recursive descent
+ * parser.
+ *
+ * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ * implicit_document ::= block_node DOCUMENT-END*
+ * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+ * block_node_or_indentless_sequence ::=
+ * ALIAS
+ * | properties (block_content | indentless_block_sequence)?
+ * | block_content
+ * | indentless_block_sequence
+ * block_node ::= ALIAS
+ * | properties block_content?
+ * | block_content
+ * flow_node ::= ALIAS
+ * | properties flow_content?
+ * | flow_content
+ * properties ::= TAG ANCHOR? | ANCHOR TAG?
+ * block_content ::= block_collection | flow_collection | SCALAR
+ * flow_content ::= flow_collection | SCALAR
+ * block_collection ::= block_sequence | block_mapping
+ * flow_collection ::= flow_sequence | flow_mapping
+ * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+ * indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+ * block_mapping ::= BLOCK-MAPPING_START
+ * ((KEY block_node_or_indentless_sequence?)?
+ * (VALUE block_node_or_indentless_sequence?)?)*
+ * BLOCK-END
+ * flow_sequence ::= FLOW-SEQUENCE-START
+ * (flow_sequence_entry FLOW-ENTRY)*
+ * flow_sequence_entry?
+ * FLOW-SEQUENCE-END
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * flow_mapping ::= FLOW-MAPPING-START
+ * (flow_mapping_entry FLOW-ENTRY)*
+ * flow_mapping_entry?
+ * FLOW-MAPPING-END
+ * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ *
+ * FIRST sets:
+ *
+ * stream: { STREAM-START }
+ * explicit_document: { DIRECTIVE DOCUMENT-START }
+ * implicit_document: FIRST(block_node)
+ * block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+ * flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+ * block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+ * flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+ * block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+ * flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+ * block_sequence: { BLOCK-SEQUENCE-START }
+ * block_mapping: { BLOCK-MAPPING-START }
+ * block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+ * indentless_sequence: { ENTRY }
+ * flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+ * flow_sequence: { FLOW-SEQUENCE-START }
+ * flow_mapping: { FLOW-MAPPING-START }
+ * flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+ * flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+ */
+
+
+/**
+ * Marked exception thrown at parser errors.
+ *
+ * See_Also: MarkedYAMLException
+ */
+class ParserException : MarkedYAMLException
+{
+ mixin MarkedExceptionCtors;
+}
+
+/// Generates events from tokens provided by a Scanner.
+///
+/// While Parser receives tokens with non-const character slices, the events it
+/// produces are immutable strings, which are usually the same slices, cast to string.
+/// Parser is the last layer of D:YAML that may possibly do any modifications to these
+/// slices.
+final class Parser
+{
+ private:
+ ///Default tag handle shortcuts and replacements.
+ static TagDirective[] defaultTagDirectives_ =
+ [TagDirective("!", "!"), TagDirective("!!", "tag:yaml.org,2002:")];
+
+ ///Scanner providing YAML tokens.
+ Scanner scanner_;
+
+ ///Event produced by the most recent state.
+ Event currentEvent_;
+
+ ///YAML version string.
+ string YAMLVersion_ = null;
+ ///Tag handle shortcuts and replacements.
+ TagDirective[] tagDirectives_;
+
+ ///Stack of states.
+ Appender!(Event delegate() @safe[]) states_;
+ ///Stack of marks used to keep track of extents of e.g. YAML collections.
+ Appender!(Mark[]) marks_;
+
+ ///Current state.
+ Event delegate() @safe state_;
+
+ public:
+ ///Construct a Parser using specified Scanner.
+ this(Scanner scanner) @safe
+ {
+ state_ = &parseStreamStart;
+ scanner_ = scanner;
+ states_.reserve(32);
+ marks_.reserve(32);
+ }
+
+ /**
+ * Check if any events are left. May have side effects in some cases.
+ */
+ bool empty() @safe
+ {
+ ensureState();
+ return currentEvent_.isNull;
+ }
+
+ /**
+ * Return the current event.
+ *
+ * Must not be called if there are no events left.
+ */
+ Event front() @safe
+ {
+ ensureState();
+ assert(!currentEvent_.isNull, "No event left to peek");
+ return currentEvent_;
+ }
+
+ /**
+ * Skip to the next event.
+ *
+ * Must not be called if there are no events left.
+ */
+ void popFront() @safe
+ {
+ currentEvent_.id = EventID.invalid;
+ ensureState();
+ }
+
+ private:
+ /// If current event is invalid, load the next valid one if possible.
+ void ensureState() @safe
+ {
+ if(currentEvent_.isNull && state_ !is null)
+ {
+ currentEvent_ = state_();
+ }
+ }
+ ///Pop and return the newest state in states_.
+ Event delegate() @safe popState() @safe
+ {
+ enforce(states_.data.length > 0,
+ new YAMLException("Parser: Need to pop state but no states left to pop"));
+ const result = states_.data.back;
+ states_.shrinkTo(states_.data.length - 1);
+ return result;
+ }
+
+ ///Pop and return the newest mark in marks_.
+ Mark popMark() @safe
+ {
+ enforce(marks_.data.length > 0,
+ new YAMLException("Parser: Need to pop mark but no marks left to pop"));
+ const result = marks_.data.back;
+ marks_.shrinkTo(marks_.data.length - 1);
+ return result;
+ }
+
+ /// Push a state on the stack
+ void pushState(Event delegate() @safe state) @safe
+ {
+ states_ ~= state;
+ }
+ /// Push a mark on the stack
+ void pushMark(Mark mark) @safe
+ {
+ marks_ ~= mark;
+ }
+
+ /**
+ * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ * implicit_document ::= block_node DOCUMENT-END*
+ * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+ */
+
+ ///Parse stream start.
+ Event parseStreamStart() @safe
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+ state_ = &parseImplicitDocumentStart;
+ return streamStartEvent(token.startMark, token.endMark);
+ }
+
+ /// Parse implicit document start, unless explicit detected: if so, parse explicit.
+ Event parseImplicitDocumentStart() @safe
+ {
+ // Parse an implicit document.
+ if(!scanner_.front.id.among!(TokenID.directive, TokenID.documentStart,
+ TokenID.streamEnd))
+ {
+ tagDirectives_ = defaultTagDirectives_;
+ const token = scanner_.front;
+
+ pushState(&parseDocumentEnd);
+ state_ = &parseBlockNode;
+
+ return documentStartEvent(token.startMark, token.endMark, false, null, null);
+ }
+ return parseDocumentStart();
+ }
+
+ ///Parse explicit document start.
+ Event parseDocumentStart() @safe
+ {
+ //Parse any extra document end indicators.
+ while(scanner_.front.id == TokenID.documentEnd)
+ {
+ scanner_.popFront();
+ }
+
+ //Parse an explicit document.
+ if(scanner_.front.id != TokenID.streamEnd)
+ {
+ const startMark = scanner_.front.startMark;
+
+ auto tagDirectives = processDirectives();
+ enforce(scanner_.front.id == TokenID.documentStart,
+ new ParserException("Expected document start but found " ~
+ scanner_.front.idString,
+ scanner_.front.startMark));
+
+ const endMark = scanner_.front.endMark;
+ scanner_.popFront();
+ pushState(&parseDocumentEnd);
+ state_ = &parseDocumentContent;
+ return documentStartEvent(startMark, endMark, true, YAMLVersion_, tagDirectives);
+ }
+ else
+ {
+ //Parse the end of the stream.
+ const token = scanner_.front;
+ scanner_.popFront();
+ assert(states_.data.length == 0);
+ assert(marks_.data.length == 0);
+ state_ = null;
+ return streamEndEvent(token.startMark, token.endMark);
+ }
+ }
+
+ ///Parse document end (explicit or implicit).
+ Event parseDocumentEnd() @safe
+ {
+ Mark startMark = scanner_.front.startMark;
+ const bool explicit = scanner_.front.id == TokenID.documentEnd;
+ Mark endMark = startMark;
+ if (explicit)
+ {
+ endMark = scanner_.front.endMark;
+ scanner_.popFront();
+ }
+
+ state_ = &parseDocumentStart;
+
+ return documentEndEvent(startMark, endMark, explicit);
+ }
+
+ ///Parse document content.
+ Event parseDocumentContent() @safe
+ {
+ if(scanner_.front.id.among!(TokenID.directive, TokenID.documentStart,
+ TokenID.documentEnd, TokenID.streamEnd))
+ {
+ state_ = popState();
+ return processEmptyScalar(scanner_.front.startMark);
+ }
+ return parseBlockNode();
+ }
+
+ /// Process directives at the beginning of a document.
+ TagDirective[] processDirectives() @safe
+ {
+ // Destroy version and tag handles from previous document.
+ YAMLVersion_ = null;
+ tagDirectives_.length = 0;
+
+ // Process directives.
+ while(scanner_.front.id == TokenID.directive)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+ string value = token.value.idup;
+ if(token.directive == DirectiveType.yaml)
+ {
+ enforce(YAMLVersion_ is null,
+ new ParserException("Duplicate YAML directive", token.startMark));
+ const minor = value.split(".")[0];
+ enforce(minor == "1",
+ new ParserException("Incompatible document (version 1.x is required)",
+ token.startMark));
+ YAMLVersion_ = value;
+ }
+ else if(token.directive == DirectiveType.tag)
+ {
+ auto handle = value[0 .. token.valueDivider];
+
+ foreach(ref pair; tagDirectives_)
+ {
+ // handle
+ const h = pair.handle;
+ enforce(h != handle, new ParserException("Duplicate tag handle: " ~ handle,
+ token.startMark));
+ }
+ tagDirectives_ ~=
+ TagDirective(handle, value[token.valueDivider .. $]);
+ }
+ // Any other directive type is ignored (only YAML and TAG are in YAML
+ // 1.1/1.2, any other directives are "reserved")
+ }
+
+ TagDirective[] value = tagDirectives_;
+
+ //Add any default tag handles that haven't been overridden.
+ foreach(ref defaultPair; defaultTagDirectives_)
+ {
+ bool found;
+ foreach(ref pair; tagDirectives_) if(defaultPair.handle == pair.handle)
+ {
+ found = true;
+ break;
+ }
+ if(!found) {tagDirectives_ ~= defaultPair; }
+ }
+
+ return value;
+ }
+
+ /**
+ * block_node_or_indentless_sequence ::= ALIAS
+ * | properties (block_content | indentless_block_sequence)?
+ * | block_content
+ * | indentless_block_sequence
+ * block_node ::= ALIAS
+ * | properties block_content?
+ * | block_content
+ * flow_node ::= ALIAS
+ * | properties flow_content?
+ * | flow_content
+ * properties ::= TAG ANCHOR? | ANCHOR TAG?
+ * block_content ::= block_collection | flow_collection | SCALAR
+ * flow_content ::= flow_collection | SCALAR
+ * block_collection ::= block_sequence | block_mapping
+ * flow_collection ::= flow_sequence | flow_mapping
+ */
+
+ ///Parse a node.
+ Event parseNode(const Flag!"block" block,
+ const Flag!"indentlessSequence" indentlessSequence = No.indentlessSequence)
+ @trusted
+ {
+ if(scanner_.front.id == TokenID.alias_)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+ state_ = popState();
+ return aliasEvent(token.startMark, token.endMark,
+ cast(string)token.value);
+ }
+
+ string anchor;
+ string tag;
+ Mark startMark, endMark, tagMark;
+ bool invalidMarks = true;
+ // The index in the tag string where tag handle ends and tag suffix starts.
+ uint tagHandleEnd;
+
+ //Get anchor/tag if detected. Return false otherwise.
+ bool get(const TokenID id, const Flag!"first" first, ref string target) @safe
+ {
+ if(scanner_.front.id != id){return false;}
+ invalidMarks = false;
+ const token = scanner_.front;
+ scanner_.popFront();
+ if(first){startMark = token.startMark;}
+ if(id == TokenID.tag)
+ {
+ tagMark = token.startMark;
+ tagHandleEnd = token.valueDivider;
+ }
+ endMark = token.endMark;
+ target = token.value.idup;
+ return true;
+ }
+
+ //Anchor and/or tag can be in any order.
+ if(get(TokenID.anchor, Yes.first, anchor)){get(TokenID.tag, No.first, tag);}
+ else if(get(TokenID.tag, Yes.first, tag)) {get(TokenID.anchor, No.first, anchor);}
+
+ if(tag !is null){tag = processTag(tag, tagHandleEnd, startMark, tagMark);}
+
+ if(invalidMarks)
+ {
+ startMark = endMark = scanner_.front.startMark;
+ }
+
+ bool implicit = (tag is null || tag == "!");
+
+ if(indentlessSequence && scanner_.front.id == TokenID.blockEntry)
+ {
+ state_ = &parseIndentlessSequenceEntry;
+ return sequenceStartEvent
+ (startMark, scanner_.front.endMark, anchor,
+ tag, implicit, CollectionStyle.block);
+ }
+
+ if(scanner_.front.id == TokenID.scalar)
+ {
+ auto token = scanner_.front;
+ scanner_.popFront();
+ auto value = token.style == ScalarStyle.doubleQuoted
+ ? handleDoubleQuotedScalarEscapes(token.value)
+ : cast(string)token.value;
+
+ implicit = (token.style == ScalarStyle.plain && tag is null) || tag == "!";
+ state_ = popState();
+ return scalarEvent(startMark, token.endMark, anchor, tag,
+ implicit, value, token.style);
+ }
+
+ if(scanner_.front.id == TokenID.flowSequenceStart)
+ {
+ endMark = scanner_.front.endMark;
+ state_ = &parseFlowSequenceEntry!(Yes.first);
+ return sequenceStartEvent(startMark, endMark, anchor, tag,
+ implicit, CollectionStyle.flow);
+ }
+
+ if(scanner_.front.id == TokenID.flowMappingStart)
+ {
+ endMark = scanner_.front.endMark;
+ state_ = &parseFlowMappingKey!(Yes.first);
+ return mappingStartEvent(startMark, endMark, anchor, tag,
+ implicit, CollectionStyle.flow);
+ }
+
+ if(block && scanner_.front.id == TokenID.blockSequenceStart)
+ {
+ endMark = scanner_.front.endMark;
+ state_ = &parseBlockSequenceEntry!(Yes.first);
+ return sequenceStartEvent(startMark, endMark, anchor, tag,
+ implicit, CollectionStyle.block);
+ }
+
+ if(block && scanner_.front.id == TokenID.blockMappingStart)
+ {
+ endMark = scanner_.front.endMark;
+ state_ = &parseBlockMappingKey!(Yes.first);
+ return mappingStartEvent(startMark, endMark, anchor, tag,
+ implicit, CollectionStyle.block);
+ }
+
+ if(anchor !is null || tag !is null)
+ {
+ state_ = popState();
+
+ //PyYAML uses a tuple(implicit, false) for the second last arg here,
+ //but the second bool is never used after that - so we don't use it.
+
+ //Empty scalars are allowed even if a tag or an anchor is specified.
+ return scalarEvent(startMark, endMark, anchor, tag,
+ implicit , "");
+ }
+
+ const token = scanner_.front;
+ throw new ParserException("While parsing a " ~ (block ? "block" : "flow") ~ " node",
+ startMark, "expected node content, but found: "
+ ~ token.idString, token.startMark);
+ }
+
+ /// Handle escape sequences in a double quoted scalar.
+ ///
+ /// Moved here from scanner as it can't always be done in-place with slices.
+ string handleDoubleQuotedScalarEscapes(const(char)[] tokenValue) const @safe
+ {
+ string notInPlace;
+ bool inEscape;
+ auto appender = appender!(string)();
+ for(const(char)[] oldValue = tokenValue; !oldValue.empty();)
+ {
+ const dchar c = oldValue.front();
+ oldValue.popFront();
+
+ if(!inEscape)
+ {
+ if(c != '\\')
+ {
+ if(notInPlace is null) { appender.put(c); }
+ else { notInPlace ~= c; }
+ continue;
+ }
+ // Escape sequence starts with a '\'
+ inEscape = true;
+ continue;
+ }
+
+ import dyaml.escapes;
+ scope(exit) { inEscape = false; }
+
+ // 'Normal' escape sequence.
+ if(c.among!(escapes))
+ {
+ if(notInPlace is null)
+ {
+ // \L and \C can't be handled in place as the expand into
+ // many-byte unicode chars
+ if(c != 'L' && c != 'P')
+ {
+ appender.put(dyaml.escapes.fromEscape(c));
+ continue;
+ }
+ // Need to duplicate as we won't fit into
+ // token.value - which is what appender uses
+ notInPlace = appender.data.dup;
+ notInPlace ~= dyaml.escapes.fromEscape(c);
+ continue;
+ }
+ notInPlace ~= dyaml.escapes.fromEscape(c);
+ continue;
+ }
+
+ // Unicode char written in hexadecimal in an escape sequence.
+ if(c.among!(escapeHexCodeList))
+ {
+ // Scanner has already checked that the hex string is valid.
+
+ const hexLength = dyaml.escapes.escapeHexLength(c);
+ // Any hex digits are 1-byte so this works.
+ const(char)[] hex = oldValue[0 .. hexLength];
+ oldValue = oldValue[hexLength .. $];
+ import std.ascii : isHexDigit;
+ assert(!hex.canFind!(d => !d.isHexDigit),
+ "Scanner must ensure the hex string is valid");
+
+ const decoded = cast(dchar)parse!int(hex, 16u);
+ if(notInPlace is null) { appender.put(decoded); }
+ else { notInPlace ~= decoded; }
+ continue;
+ }
+
+ assert(false, "Scanner must handle unsupported escapes");
+ }
+
+ return notInPlace is null ? appender.data : notInPlace;
+ }
+
+ /**
+ * Process a tag string retrieved from a tag token.
+ *
+ * Params: tag = Tag before processing.
+ * handleEnd = Index in tag where tag handle ends and tag suffix
+ * starts.
+ * startMark = Position of the node the tag belongs to.
+ * tagMark = Position of the tag.
+ */
+ string processTag(const string tag, const uint handleEnd,
+ const Mark startMark, const Mark tagMark)
+ const @safe
+ {
+ const handle = tag[0 .. handleEnd];
+ const suffix = tag[handleEnd .. $];
+
+ if(handle.length > 0)
+ {
+ string replacement;
+ foreach(ref pair; tagDirectives_)
+ {
+ if(pair.handle == handle)
+ {
+ replacement = pair.prefix;
+ break;
+ }
+ }
+ //handle must be in tagDirectives_
+ enforce(replacement !is null,
+ new ParserException("While parsing a node", startMark,
+ "found undefined tag handle: " ~ handle, tagMark));
+ return replacement ~ suffix;
+ }
+ return suffix;
+ }
+
+ ///Wrappers to parse nodes.
+ Event parseBlockNode() @safe {return parseNode(Yes.block);}
+ Event parseFlowNode() @safe {return parseNode(No.block);}
+ Event parseBlockNodeOrIndentlessSequence() @safe {return parseNode(Yes.block, Yes.indentlessSequence);}
+
+ ///block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ ///Parse an entry of a block sequence. If first is true, this is the first entry.
+ Event parseBlockSequenceEntry(Flag!"first" first)() @safe
+ {
+ static if(first)
+ {
+ pushMark(scanner_.front.startMark);
+ scanner_.popFront();
+ }
+
+ if(scanner_.front.id == TokenID.blockEntry)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+ if(!scanner_.front.id.among!(TokenID.blockEntry, TokenID.blockEnd))
+ {
+ pushState(&parseBlockSequenceEntry!(No.first));
+ return parseBlockNode();
+ }
+
+ state_ = &parseBlockSequenceEntry!(No.first);
+ return processEmptyScalar(token.endMark);
+ }
+
+ if(scanner_.front.id != TokenID.blockEnd)
+ {
+ const token = scanner_.front;
+ throw new ParserException("While parsing a block collection", marks_.data.back,
+ "expected block end, but found " ~ token.idString,
+ token.startMark);
+ }
+
+ state_ = popState();
+ popMark();
+ const token = scanner_.front;
+ scanner_.popFront();
+ return sequenceEndEvent(token.startMark, token.endMark);
+ }
+
+ ///indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ ///Parse an entry of an indentless sequence.
+ Event parseIndentlessSequenceEntry() @safe
+ {
+ if(scanner_.front.id == TokenID.blockEntry)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+
+ if(!scanner_.front.id.among!(TokenID.blockEntry, TokenID.key,
+ TokenID.value, TokenID.blockEnd))
+ {
+ pushState(&parseIndentlessSequenceEntry);
+ return parseBlockNode();
+ }
+
+ state_ = &parseIndentlessSequenceEntry;
+ return processEmptyScalar(token.endMark);
+ }
+
+ state_ = popState();
+ const token = scanner_.front;
+ return sequenceEndEvent(token.startMark, token.endMark);
+ }
+
+ /**
+ * block_mapping ::= BLOCK-MAPPING_START
+ * ((KEY block_node_or_indentless_sequence?)?
+ * (VALUE block_node_or_indentless_sequence?)?)*
+ * BLOCK-END
+ */
+
+ ///Parse a key in a block mapping. If first is true, this is the first key.
+ Event parseBlockMappingKey(Flag!"first" first)() @safe
+ {
+ static if(first)
+ {
+ pushMark(scanner_.front.startMark);
+ scanner_.popFront();
+ }
+
+ if(scanner_.front.id == TokenID.key)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+
+ if(!scanner_.front.id.among!(TokenID.key, TokenID.value, TokenID.blockEnd))
+ {
+ pushState(&parseBlockMappingValue);
+ return parseBlockNodeOrIndentlessSequence();
+ }
+
+ state_ = &parseBlockMappingValue;
+ return processEmptyScalar(token.endMark);
+ }
+
+ if(scanner_.front.id != TokenID.blockEnd)
+ {
+ const token = scanner_.front;
+ throw new ParserException("While parsing a block mapping", marks_.data.back,
+ "expected block end, but found: " ~ token.idString,
+ token.startMark);
+ }
+
+ state_ = popState();
+ popMark();
+ const token = scanner_.front;
+ scanner_.popFront();
+ return mappingEndEvent(token.startMark, token.endMark);
+ }
+
+ ///Parse a value in a block mapping.
+ Event parseBlockMappingValue() @safe
+ {
+ if(scanner_.front.id == TokenID.value)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+
+ if(!scanner_.front.id.among!(TokenID.key, TokenID.value, TokenID.blockEnd))
+ {
+ pushState(&parseBlockMappingKey!(No.first));
+ return parseBlockNodeOrIndentlessSequence();
+ }
+
+ state_ = &parseBlockMappingKey!(No.first);
+ return processEmptyScalar(token.endMark);
+ }
+
+ state_= &parseBlockMappingKey!(No.first);
+ return processEmptyScalar(scanner_.front.startMark);
+ }
+
+ /**
+ * flow_sequence ::= FLOW-SEQUENCE-START
+ * (flow_sequence_entry FLOW-ENTRY)*
+ * flow_sequence_entry?
+ * FLOW-SEQUENCE-END
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ *
+ * Note that while production rules for both flow_sequence_entry and
+ * flow_mapping_entry are equal, their interpretations are different.
+ * For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ * generate an inline mapping (set syntax).
+ */
+
+ ///Parse an entry in a flow sequence. If first is true, this is the first entry.
+ Event parseFlowSequenceEntry(Flag!"first" first)() @safe
+ {
+ static if(first)
+ {
+ pushMark(scanner_.front.startMark);
+ scanner_.popFront();
+ }
+
+ if(scanner_.front.id != TokenID.flowSequenceEnd)
+ {
+ static if(!first)
+ {
+ if(scanner_.front.id == TokenID.flowEntry)
+ {
+ scanner_.popFront();
+ }
+ else
+ {
+ const token = scanner_.front;
+ throw new ParserException("While parsing a flow sequence", marks_.data.back,
+ "expected ',' or ']', but got: " ~
+ token.idString, token.startMark);
+ }
+ }
+
+ if(scanner_.front.id == TokenID.key)
+ {
+ const token = scanner_.front;
+ state_ = &parseFlowSequenceEntryMappingKey;
+ return mappingStartEvent(token.startMark, token.endMark,
+ null, null, true, CollectionStyle.flow);
+ }
+ else if(scanner_.front.id != TokenID.flowSequenceEnd)
+ {
+ pushState(&parseFlowSequenceEntry!(No.first));
+ return parseFlowNode();
+ }
+ }
+
+ const token = scanner_.front;
+ scanner_.popFront();
+ state_ = popState();
+ popMark();
+ return sequenceEndEvent(token.startMark, token.endMark);
+ }
+
+ ///Parse a key in flow context.
+ Event parseFlowKey(Event delegate() @safe nextState) @safe
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+
+ if(!scanner_.front.id.among!(TokenID.value, TokenID.flowEntry,
+ TokenID.flowSequenceEnd))
+ {
+ pushState(nextState);
+ return parseFlowNode();
+ }
+
+ state_ = nextState;
+ return processEmptyScalar(token.endMark);
+ }
+
+ ///Parse a mapping key in an entry in a flow sequence.
+ Event parseFlowSequenceEntryMappingKey() @safe
+ {
+ return parseFlowKey(&parseFlowSequenceEntryMappingValue);
+ }
+
+ ///Parse a mapping value in a flow context.
+ Event parseFlowValue(TokenID checkId, Event delegate() @safe nextState)
+ @safe
+ {
+ if(scanner_.front.id == TokenID.value)
+ {
+ const token = scanner_.front;
+ scanner_.popFront();
+ if(!scanner_.front.id.among(TokenID.flowEntry, checkId))
+ {
+ pushState(nextState);
+ return parseFlowNode();
+ }
+
+ state_ = nextState;
+ return processEmptyScalar(token.endMark);
+ }
+
+ state_ = nextState;
+ return processEmptyScalar(scanner_.front.startMark);
+ }
+
+ ///Parse a mapping value in an entry in a flow sequence.
+ Event parseFlowSequenceEntryMappingValue() @safe
+ {
+ return parseFlowValue(TokenID.flowSequenceEnd,
+ &parseFlowSequenceEntryMappingEnd);
+ }
+
+ ///Parse end of a mapping in a flow sequence entry.
+ Event parseFlowSequenceEntryMappingEnd() @safe
+ {
+ state_ = &parseFlowSequenceEntry!(No.first);
+ const token = scanner_.front;
+ return mappingEndEvent(token.startMark, token.startMark);
+ }
+
+ /**
+ * flow_mapping ::= FLOW-MAPPING-START
+ * (flow_mapping_entry FLOW-ENTRY)*
+ * flow_mapping_entry?
+ * FLOW-MAPPING-END
+ * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ */
+
+ ///Parse a key in a flow mapping.
+ Event parseFlowMappingKey(Flag!"first" first)() @safe
+ {
+ static if(first)
+ {
+ pushMark(scanner_.front.startMark);
+ scanner_.popFront();
+ }
+
+ if(scanner_.front.id != TokenID.flowMappingEnd)
+ {
+ static if(!first)
+ {
+ if(scanner_.front.id == TokenID.flowEntry)
+ {
+ scanner_.popFront();
+ }
+ else
+ {
+ const token = scanner_.front;
+ throw new ParserException("While parsing a flow mapping", marks_.data.back,
+ "expected ',' or '}', but got: " ~
+ token.idString, token.startMark);
+ }
+ }
+
+ if(scanner_.front.id == TokenID.key)
+ {
+ return parseFlowKey(&parseFlowMappingValue);
+ }
+
+ if(scanner_.front.id != TokenID.flowMappingEnd)
+ {
+ pushState(&parseFlowMappingEmptyValue);
+ return parseFlowNode();
+ }
+ }
+
+ const token = scanner_.front;
+ scanner_.popFront();
+ state_ = popState();
+ popMark();
+ return mappingEndEvent(token.startMark, token.endMark);
+ }
+
+ ///Parse a value in a flow mapping.
+ Event parseFlowMappingValue() @safe
+ {
+ return parseFlowValue(TokenID.flowMappingEnd, &parseFlowMappingKey!(No.first));
+ }
+
+ ///Parse an empty value in a flow mapping.
+ Event parseFlowMappingEmptyValue() @safe
+ {
+ state_ = &parseFlowMappingKey!(No.first);
+ return processEmptyScalar(scanner_.front.startMark);
+ }
+
+ ///Return an empty scalar.
+ Event processEmptyScalar(const Mark mark) @safe pure nothrow const @nogc
+ {
+ return scalarEvent(mark, mark, null, null, true, "");
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/queue.d b/src/ext_depends/D-YAML/source/dyaml/queue.d
new file mode 100644
index 0000000..57b0d34
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/queue.d
@@ -0,0 +1,272 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.queue;
+
+
+import std.traits : hasMember, hasIndirections;
+
+package:
+
+/// Simple queue implemented as a singly linked list with a tail pointer.
+///
+/// Needed in some D:YAML code that needs a queue-like structure without too much
+/// reallocation that goes with an array.
+///
+/// Allocations are non-GC and are damped by a free-list based on the nodes
+/// that are removed. Note that elements lifetime must be managed
+/// outside.
+struct Queue(T)
+if (!hasMember!(T, "__xdtor"))
+{
+
+private:
+
+ // Linked list node containing one element and pointer to the next node.
+ struct Node
+ {
+ T payload_;
+ Node* next_;
+ }
+
+ // Start of the linked list - first element added in time (end of the queue).
+ Node* first_;
+ // Last element of the linked list - last element added in time (start of the queue).
+ Node* last_;
+ // free-list
+ Node* stock;
+
+ // Length of the queue.
+ size_t length_;
+
+ // allocate a new node or recycle one from the stock.
+ Node* makeNewNode(T thePayload, Node* theNext = null) @trusted nothrow @nogc
+ {
+ import std.experimental.allocator : make;
+ import std.experimental.allocator.mallocator : Mallocator;
+
+ Node* result;
+ if (stock !is null)
+ {
+ result = stock;
+ stock = result.next_;
+ result.payload_ = thePayload;
+ result.next_ = theNext;
+ }
+ else
+ {
+ result = Mallocator.instance.make!(Node)(thePayload, theNext);
+ // GC can dispose T managed member if it thinks they are no used...
+ static if (hasIndirections!T)
+ {
+ import core.memory : GC;
+ GC.addRange(result, Node.sizeof);
+ }
+ }
+ return result;
+ }
+
+ // free the stock of available free nodes.
+ void freeStock() @trusted @nogc nothrow
+ {
+ import std.experimental.allocator.mallocator : Mallocator;
+
+ while (stock !is null)
+ {
+ Node* toFree = stock;
+ stock = stock.next_;
+ static if (hasIndirections!T)
+ {
+ import core.memory : GC;
+ GC.removeRange(toFree);
+ }
+ Mallocator.instance.deallocate((cast(ubyte*) toFree)[0 .. Node.sizeof]);
+ }
+ }
+
+public:
+
+ @disable void opAssign(ref Queue);
+ @disable bool opEquals(ref Queue);
+ @disable int opCmp(ref Queue);
+
+ this(this) @safe nothrow @nogc
+ {
+ auto node = first_;
+ first_ = null;
+ last_ = null;
+ while (node !is null)
+ {
+ Node* newLast = makeNewNode(node.payload_);
+ if (last_ !is null)
+ last_.next_ = newLast;
+ if (first_ is null)
+ first_ = newLast;
+ last_ = newLast;
+ node = node.next_;
+ }
+ }
+
+ ~this() @safe nothrow @nogc
+ {
+ freeStock();
+ stock = first_;
+ freeStock();
+ }
+
+ /// Returns a forward range iterating over this queue.
+ auto range() @safe pure nothrow @nogc
+ {
+ static struct Result
+ {
+ private Node* cursor;
+
+ void popFront() @safe pure nothrow @nogc
+ {
+ cursor = cursor.next_;
+ }
+ ref T front() @safe pure nothrow @nogc
+ in(cursor !is null)
+ {
+ return cursor.payload_;
+ }
+ bool empty() @safe pure nothrow @nogc const
+ {
+ return cursor is null;
+ }
+ }
+ return Result(first_);
+ }
+
+ /// Push a new item to the queue.
+ void push(T item) @nogc @safe nothrow
+ {
+ Node* newLast = makeNewNode(item);
+ if (last_ !is null)
+ last_.next_ = newLast;
+ if (first_ is null)
+ first_ = newLast;
+ last_ = newLast;
+ ++length_;
+ }
+
+ /// Insert a new item putting it to specified index in the linked list.
+ void insert(T item, const size_t idx) @safe nothrow
+ in
+ {
+ assert(idx <= length_);
+ }
+ do
+ {
+ if (idx == 0)
+ {
+ first_ = makeNewNode(item, first_);
+ ++length_;
+ }
+ // Adding before last added element, so we can just push.
+ else if (idx == length_)
+ {
+ push(item);
+ }
+ else
+ {
+ // Get the element before one we're inserting.
+ Node* current = first_;
+ foreach (i; 1 .. idx)
+ current = current.next_;
+
+ assert(current);
+ // Insert a new node after current, and put current.next_ behind it.
+ current.next_ = makeNewNode(item, current.next_);
+ ++length_;
+ }
+ }
+
+ /// Returns: The next element in the queue and remove it.
+ T pop() @safe nothrow
+ in
+ {
+ assert(!empty, "Trying to pop an element from an empty queue");
+ }
+ do
+ {
+ T result = peek();
+
+ Node* oldStock = stock;
+ Node* old = first_;
+ first_ = first_.next_;
+
+ // start the stock from the popped element
+ stock = old;
+ old.next_ = null;
+ // add the existing "old" stock to the new first stock element
+ if (oldStock !is null)
+ stock.next_ = oldStock;
+
+ if (--length_ == 0)
+ {
+ assert(first_ is null);
+ last_ = null;
+ }
+
+ return result;
+ }
+
+ /// Returns: The next element in the queue.
+ ref inout(T) peek() @safe pure nothrow inout @nogc
+ in
+ {
+ assert(!empty, "Trying to peek at an element in an empty queue");
+ }
+ do
+ {
+ return first_.payload_;
+ }
+
+ /// Returns: true of the queue empty, false otherwise.
+ bool empty() @safe pure nothrow const @nogc
+ {
+ return first_ is null;
+ }
+
+ /// Returns: The number of elements in the queue.
+ size_t length() @safe pure nothrow const @nogc
+ {
+ return length_;
+ }
+}
+
+@safe nothrow unittest
+{
+ auto queue = Queue!int();
+ assert(queue.empty);
+ foreach (i; 0 .. 65)
+ {
+ queue.push(5);
+ assert(queue.pop() == 5);
+ assert(queue.empty);
+ assert(queue.length_ == 0);
+ }
+
+ int[] array = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5];
+ foreach (i; array)
+ {
+ queue.push(i);
+ }
+
+ array = 42 ~ array[0 .. 3] ~ 42 ~ array[3 .. $] ~ 42;
+ queue.insert(42, 3);
+ queue.insert(42, 0);
+ queue.insert(42, queue.length);
+
+ int[] array2;
+ while (!queue.empty)
+ {
+ array2 ~= queue.pop();
+ }
+
+ assert(array == array2);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/reader.d b/src/ext_depends/D-YAML/source/dyaml/reader.d
new file mode 100644
index 0000000..9fe42fc
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/reader.d
@@ -0,0 +1,906 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.reader;
+
+
+import core.stdc.stdlib;
+import core.stdc.string;
+import core.thread;
+
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.exception;
+import std.range;
+import std.string;
+import std.system;
+import std.typecons;
+import std.utf;
+
+import tinyendian;
+
+import dyaml.encoding;
+import dyaml.exception;
+
+alias isBreak = among!('\n', '\u0085', '\u2028', '\u2029');
+
+package:
+
+
+///Exception thrown at Reader errors.
+class ReaderException : YAMLException
+{
+ this(string msg, string file = __FILE__, size_t line = __LINE__)
+ @safe pure nothrow
+ {
+ super("Reader error: " ~ msg, file, line);
+ }
+}
+
+/// Provides an API to read characters from a UTF-8 buffer and build slices into that
+/// buffer to avoid allocations (see SliceBuilder).
+final class Reader
+{
+ private:
+ // Buffer of currently loaded characters.
+ char[] buffer_;
+
+ // Current position within buffer. Only data after this position can be read.
+ size_t bufferOffset_;
+
+ // Index of the current character in the buffer.
+ size_t charIndex_;
+ // Number of characters (code points) in buffer_.
+ size_t characterCount_;
+
+ // File name
+ string name_;
+ // Current line in file.
+ uint line_;
+ // Current column in file.
+ uint column_;
+
+ // Original Unicode encoding of the data.
+ Encoding encoding_;
+
+ version(unittest)
+ {
+ // Endianness of the input before it was converted (for testing)
+ Endian endian_;
+ }
+
+ // The number of consecutive ASCII characters starting at bufferOffset_.
+ //
+ // Used to minimize UTF-8 decoding.
+ size_t upcomingASCII_;
+
+ // Index to buffer_ where the last decoded character starts.
+ size_t lastDecodedBufferOffset_;
+ // Offset, relative to charIndex_, of the last decoded character,
+ // in code points, not chars.
+ size_t lastDecodedCharOffset_;
+
+ public:
+ /// Construct a Reader.
+ ///
+ /// Params: buffer = Buffer with YAML data. This may be e.g. the entire
+ /// contents of a file or a string. $(B will) be modified by
+ /// the Reader and other parts of D:YAML (D:YAML tries to
+ /// reuse the buffer to minimize memory allocations)
+ /// name = File name if the buffer is the contents of a file or
+ /// `"<unknown>"` if the buffer is the contents of a string.
+ ///
+ /// Throws: ReaderException on a UTF decoding error or if there are
+ /// nonprintable Unicode characters illegal in YAML.
+ this(ubyte[] buffer, string name = "<unknown>") @safe pure
+ {
+ name_ = name;
+ auto endianResult = fixUTFByteOrder(buffer);
+ if(endianResult.bytesStripped > 0)
+ {
+ throw new ReaderException("Size of UTF-16 or UTF-32 input not aligned " ~
+ "to 2 or 4 bytes, respectively");
+ }
+
+ version(unittest) { endian_ = endianResult.endian; }
+ encoding_ = endianResult.encoding;
+
+ auto utf8Result = toUTF8(endianResult.array, endianResult.encoding);
+ const msg = utf8Result.errorMessage;
+ if(msg !is null)
+ {
+ throw new ReaderException("Error when converting to UTF-8: " ~ msg);
+ }
+
+ buffer_ = utf8Result.utf8;
+
+ characterCount_ = utf8Result.characterCount;
+ // Check that all characters in buffer are printable.
+ enforce(isPrintableValidUTF8(buffer_),
+ new ReaderException("Special unicode characters are not allowed"));
+
+ this.sliceBuilder = SliceBuilder(this);
+ checkASCII();
+ }
+
+ /// Get character at specified index relative to current position.
+ ///
+ /// Params: index = Index of the character to get relative to current position
+ /// in the buffer. Can point outside of the buffer; In that
+ /// case, '\0' will be returned.
+ ///
+ /// Returns: Character at specified position or '\0' if outside of the buffer.
+ ///
+ // XXX removed; search for 'risky' to find why.
+ // Throws: ReaderException if trying to read past the end of the buffer.
+ dchar peek(const size_t index) @safe pure
+ {
+ if(index < upcomingASCII_) { return buffer_[bufferOffset_ + index]; }
+ if(characterCount_ <= charIndex_ + index)
+ {
+ // XXX This is risky; revert this if bugs are introduced. We rely on
+ // the assumption that Reader only uses peek() to detect end of buffer.
+ // The test suite passes.
+ // Revert this case here and in other peek() versions if this causes
+ // errors.
+ // throw new ReaderException("Trying to read past the end of the buffer");
+ return '\0';
+ }
+
+ // Optimized path for Scanner code that peeks chars in linear order to
+ // determine the length of some sequence.
+ if(index == lastDecodedCharOffset_)
+ {
+ ++lastDecodedCharOffset_;
+ const char b = buffer_[lastDecodedBufferOffset_];
+ // ASCII
+ if(b < 0x80)
+ {
+ ++lastDecodedBufferOffset_;
+ return b;
+ }
+ return decode(buffer_, lastDecodedBufferOffset_);
+ }
+
+ // 'Slow' path where we decode everything up to the requested character.
+ const asciiToTake = min(upcomingASCII_, index);
+ lastDecodedCharOffset_ = asciiToTake;
+ lastDecodedBufferOffset_ = bufferOffset_ + asciiToTake;
+ dchar d;
+ while(lastDecodedCharOffset_ <= index)
+ {
+ d = decodeNext();
+ }
+
+ return d;
+ }
+
+ /// Optimized version of peek() for the case where peek index is 0.
+ dchar peek() @safe pure
+ {
+ if(upcomingASCII_ > 0) { return buffer_[bufferOffset_]; }
+ if(characterCount_ <= charIndex_) { return '\0'; }
+
+ lastDecodedCharOffset_ = 0;
+ lastDecodedBufferOffset_ = bufferOffset_;
+ return decodeNext();
+ }
+
+ /// Get byte at specified index relative to current position.
+ ///
+ /// Params: index = Index of the byte to get relative to current position
+ /// in the buffer. Can point outside of the buffer; In that
+ /// case, '\0' will be returned.
+ ///
+ /// Returns: Byte at specified position or '\0' if outside of the buffer.
+ char peekByte(const size_t index) @safe pure nothrow @nogc
+ {
+ return characterCount_ > (charIndex_ + index) ? buffer_[bufferOffset_ + index] : '\0';
+ }
+
+ /// Optimized version of peekByte() for the case where peek byte index is 0.
+ char peekByte() @safe pure nothrow @nogc
+ {
+ return characterCount_ > charIndex_ ? buffer_[bufferOffset_] : '\0';
+ }
+
+
+ /// Get specified number of characters starting at current position.
+ ///
+ /// Note: This gets only a "view" into the internal buffer, which will be
+ /// invalidated after other Reader calls. Use SliceBuilder to build slices
+ /// for permanent use.
+ ///
+ /// Params: length = Number of characters (code points, not bytes) to get. May
+ /// reach past the end of the buffer; in that case the returned
+ /// slice will be shorter.
+ ///
+ /// Returns: Characters starting at current position or an empty slice if out of bounds.
+ char[] prefix(const size_t length) @safe pure
+ {
+ return slice(length);
+ }
+
+ /// Get specified number of bytes, not code points, starting at current position.
+ ///
+ /// Note: This gets only a "view" into the internal buffer, which will be
+ /// invalidated after other Reader calls. Use SliceBuilder to build slices
+ /// for permanent use.
+ ///
+ /// Params: length = Number bytes (not code points) to get. May NOT reach past
+ /// the end of the buffer; should be used with peek() to avoid
+ /// this.
+ ///
+ /// Returns: Bytes starting at current position.
+ char[] prefixBytes(const size_t length) @safe pure nothrow @nogc
+ in(length == 0 || bufferOffset_ + length <= buffer_.length, "prefixBytes out of bounds")
+ {
+ return buffer_[bufferOffset_ .. bufferOffset_ + length];
+ }
+
+ /// Get a slice view of the internal buffer, starting at the current position.
+ ///
+ /// Note: This gets only a "view" into the internal buffer,
+ /// which get invalidated after other Reader calls.
+ ///
+ /// Params: end = End of the slice relative to current position. May reach past
+ /// the end of the buffer; in that case the returned slice will
+ /// be shorter.
+ ///
+ /// Returns: Slice into the internal buffer or an empty slice if out of bounds.
+ char[] slice(const size_t end) @safe pure
+ {
+ // Fast path in case the caller has already peek()ed all the way to end.
+ if(end == lastDecodedCharOffset_)
+ {
+ return buffer_[bufferOffset_ .. lastDecodedBufferOffset_];
+ }
+
+ const asciiToTake = min(upcomingASCII_, end, buffer_.length);
+ lastDecodedCharOffset_ = asciiToTake;
+ lastDecodedBufferOffset_ = bufferOffset_ + asciiToTake;
+
+ // 'Slow' path - decode everything up to end.
+ while(lastDecodedCharOffset_ < end &&
+ lastDecodedBufferOffset_ < buffer_.length)
+ {
+ decodeNext();
+ }
+
+ return buffer_[bufferOffset_ .. lastDecodedBufferOffset_];
+ }
+
+ /// Get the next character, moving buffer position beyond it.
+ ///
+ /// Returns: Next character.
+ ///
+ /// Throws: ReaderException if trying to read past the end of the buffer
+ /// or if invalid data is read.
+ dchar get() @safe pure
+ {
+ const result = peek();
+ forward();
+ return result;
+ }
+
+ /// Get specified number of characters, moving buffer position beyond them.
+ ///
+ /// Params: length = Number or characters (code points, not bytes) to get.
+ ///
+ /// Returns: Characters starting at current position.
+ char[] get(const size_t length) @safe pure
+ {
+ auto result = slice(length);
+ forward(length);
+ return result;
+ }
+
+ /// Move current position forward.
+ ///
+ /// Params: length = Number of characters to move position forward.
+ void forward(size_t length) @safe pure
+ {
+ while(length > 0)
+ {
+ auto asciiToTake = min(upcomingASCII_, length);
+ charIndex_ += asciiToTake;
+ length -= asciiToTake;
+ upcomingASCII_ -= asciiToTake;
+
+ for(; asciiToTake > 0; --asciiToTake)
+ {
+ const c = buffer_[bufferOffset_++];
+ // c is ASCII, do we only need to check for ASCII line breaks.
+ if(c == '\n' || (c == '\r' && buffer_[bufferOffset_] != '\n'))
+ {
+ ++line_;
+ column_ = 0;
+ continue;
+ }
+ ++column_;
+ }
+
+ // If we have used up all upcoming ASCII chars, the next char is
+ // non-ASCII even after this returns, so upcomingASCII_ doesn't need to
+ // be updated - it's zero.
+ if(length == 0) { break; }
+
+ assert(upcomingASCII_ == 0,
+ "Running unicode handling code but we haven't run out of ASCII chars");
+ assert(bufferOffset_ < buffer_.length,
+ "Attempted to decode past the end of YAML buffer");
+ assert(buffer_[bufferOffset_] >= 0x80,
+ "ASCII must be handled by preceding code");
+
+ ++charIndex_;
+ const c = decode(buffer_, bufferOffset_);
+
+ // New line. (can compare with '\n' without decoding since it's ASCII)
+ if(c.isBreak || (c == '\r' && buffer_[bufferOffset_] != '\n'))
+ {
+ ++line_;
+ column_ = 0;
+ }
+ else if(c != '\uFEFF') { ++column_; }
+ --length;
+ checkASCII();
+ }
+
+ lastDecodedBufferOffset_ = bufferOffset_;
+ lastDecodedCharOffset_ = 0;
+ }
+
+ /// Move current position forward by one character.
+ void forward() @safe pure
+ {
+ ++charIndex_;
+ lastDecodedBufferOffset_ = bufferOffset_;
+ lastDecodedCharOffset_ = 0;
+
+ // ASCII
+ if(upcomingASCII_ > 0)
+ {
+ --upcomingASCII_;
+ const c = buffer_[bufferOffset_++];
+
+ if(c == '\n' || (c == '\r' && buffer_[bufferOffset_] != '\n'))
+ {
+ ++line_;
+ column_ = 0;
+ return;
+ }
+ ++column_;
+ return;
+ }
+
+ // UTF-8
+ assert(bufferOffset_ < buffer_.length,
+ "Attempted to decode past the end of YAML buffer");
+ assert(buffer_[bufferOffset_] >= 0x80,
+ "ASCII must be handled by preceding code");
+
+ const c = decode(buffer_, bufferOffset_);
+
+ // New line. (can compare with '\n' without decoding since it's ASCII)
+ if(c.isBreak || (c == '\r' && buffer_[bufferOffset_] != '\n'))
+ {
+ ++line_;
+ column_ = 0;
+ }
+ else if(c != '\uFEFF') { ++column_; }
+
+ checkASCII();
+ }
+
+ /// Used to build slices of read data in Reader; to avoid allocations.
+ SliceBuilder sliceBuilder;
+
+ /// Get a string describing current buffer position, used for error messages.
+ Mark mark() const pure nothrow @nogc @safe { return Mark(name_, line_, column_); }
+
+ /// Get file name.
+ string name() const @safe pure nothrow @nogc { return name_; }
+
+ /// Get current line number.
+ uint line() const @safe pure nothrow @nogc { return line_; }
+
+ /// Get current column number.
+ uint column() const @safe pure nothrow @nogc { return column_; }
+
+ /// Get index of the current character in the buffer.
+ size_t charIndex() const @safe pure nothrow @nogc { return charIndex_; }
+
+ /// Get encoding of the input buffer.
+ Encoding encoding() const @safe pure nothrow @nogc { return encoding_; }
+
+private:
+ // Update upcomingASCII_ (should be called forward()ing over a UTF-8 sequence)
+ void checkASCII() @safe pure nothrow @nogc
+ {
+ upcomingASCII_ = countASCII(buffer_[bufferOffset_ .. $]);
+ }
+
+ // Decode the next character relative to
+ // lastDecodedCharOffset_/lastDecodedBufferOffset_ and update them.
+ //
+ // Does not advance the buffer position. Used in peek() and slice().
+ dchar decodeNext() @safe pure
+ {
+ assert(lastDecodedBufferOffset_ < buffer_.length,
+ "Attempted to decode past the end of YAML buffer");
+ const char b = buffer_[lastDecodedBufferOffset_];
+ ++lastDecodedCharOffset_;
+ // ASCII
+ if(b < 0x80)
+ {
+ ++lastDecodedBufferOffset_;
+ return b;
+ }
+
+ return decode(buffer_, lastDecodedBufferOffset_);
+ }
+}
+
+/// Used to build slices of already read data in Reader buffer, avoiding allocations.
+///
+/// Usually these slices point to unchanged Reader data, but sometimes the data is
+/// changed due to how YAML interprets certain characters/strings.
+///
+/// See begin() documentation.
+struct SliceBuilder
+{
+private:
+ // No copying by the user.
+ @disable this(this);
+ @disable void opAssign(ref SliceBuilder);
+
+ // Reader this builder works in.
+ Reader reader_;
+
+ // Start of the slice om reader_.buffer_ (size_t.max while no slice being build)
+ size_t start_ = size_t.max;
+ // End of the slice om reader_.buffer_ (size_t.max while no slice being build)
+ size_t end_ = size_t.max;
+
+ // Stack of slice ends to revert to (see Transaction)
+ //
+ // Very few levels as we don't want arbitrarily nested transactions.
+ size_t[4] endStack_;
+ // The number of elements currently in endStack_.
+ size_t endStackUsed_;
+
+ @safe const pure nothrow @nogc invariant()
+ {
+ if(!inProgress) { return; }
+ assert(end_ <= reader_.bufferOffset_, "Slice ends after buffer position");
+ assert(start_ <= end_, "Slice start after slice end");
+ }
+
+ // Is a slice currently being built?
+ bool inProgress() @safe const pure nothrow @nogc
+ in(start_ == size_t.max ? end_ == size_t.max : end_ != size_t.max, "start_/end_ are not consistent")
+ {
+ return start_ != size_t.max;
+ }
+
+public:
+ /// Begin building a slice.
+ ///
+ /// Only one slice can be built at any given time; before beginning a new slice,
+ /// finish the previous one (if any).
+ ///
+ /// The slice starts at the current position in the Reader buffer. It can only be
+ /// extended up to the current position in the buffer; Reader methods get() and
+ /// forward() move the position. E.g. it is valid to extend a slice by write()-ing
+ /// a string just returned by get() - but not one returned by prefix() unless the
+ /// position has changed since the prefix() call.
+ void begin() @safe pure nothrow @nogc
+ in(!inProgress, "Beginning a slice while another slice is being built")
+ in(endStackUsed_ == 0, "Slice stack not empty at slice begin")
+ {
+
+ start_ = reader_.bufferOffset_;
+ end_ = reader_.bufferOffset_;
+ }
+
+ /// Finish building a slice and return it.
+ ///
+ /// Any Transactions on the slice must be committed or destroyed before the slice
+ /// is finished.
+ ///
+ /// Returns a string; once a slice is finished it is definitive that its contents
+ /// will not be changed.
+ char[] finish() @safe pure nothrow @nogc
+ in(inProgress, "finish called without begin")
+ in(endStackUsed_ == 0, "Finishing a slice with running transactions.")
+ {
+
+ auto result = reader_.buffer_[start_ .. end_];
+ start_ = end_ = size_t.max;
+ return result;
+ }
+
+ /// Write a string to the slice being built.
+ ///
+ /// Data can only be written up to the current position in the Reader buffer.
+ ///
+ /// If str is a string returned by a Reader method, and str starts right after the
+ /// end of the slice being built, the slice is extended (trivial operation).
+ ///
+ /// See_Also: begin
+ void write(scope char[] str) @safe pure nothrow @nogc
+ {
+ assert(inProgress, "write called without begin");
+ assert(end_ <= reader_.bufferOffset_,
+ "AT START: Slice ends after buffer position");
+
+ // Nothing? Already done.
+ if (str.length == 0) { return; }
+ // If str starts at the end of the slice (is a string returned by a Reader
+ // method), just extend the slice to contain str.
+ if(&str[0] == &reader_.buffer_[end_])
+ {
+ end_ += str.length;
+ }
+ // Even if str does not start at the end of the slice, it still may be returned
+ // by a Reader method and point to buffer. So we need to memmove.
+ else
+ {
+ copy(str, reader_.buffer_[end_..end_ + str.length * char.sizeof]);
+ end_ += str.length;
+ }
+ }
+
+ /// Write a character to the slice being built.
+ ///
+ /// Data can only be written up to the current position in the Reader buffer.
+ ///
+ /// See_Also: begin
+ void write(dchar c) @safe pure
+ in(inProgress, "write called without begin")
+ {
+ if(c < 0x80)
+ {
+ reader_.buffer_[end_++] = cast(char)c;
+ return;
+ }
+
+ // We need to encode a non-ASCII dchar into UTF-8
+ char[4] encodeBuf;
+ const bytes = encode(encodeBuf, c);
+ reader_.buffer_[end_ .. end_ + bytes] = encodeBuf[0 .. bytes];
+ end_ += bytes;
+ }
+
+ /// Insert a character to a specified position in the slice.
+ ///
+ /// Enlarges the slice by 1 char. Note that the slice can only extend up to the
+ /// current position in the Reader buffer.
+ ///
+ /// Params:
+ ///
+ /// c = The character to insert.
+ /// position = Position to insert the character at in code units, not code points.
+ /// Must be less than slice length(); a previously returned length()
+ /// can be used.
+ void insert(const dchar c, const size_t position) @safe pure
+ in(inProgress, "insert called without begin")
+ in(start_ + position <= end_, "Trying to insert after the end of the slice")
+ {
+
+ const point = start_ + position;
+ const movedLength = end_ - point;
+
+ // Encode c into UTF-8
+ char[4] encodeBuf;
+ if(c < 0x80) { encodeBuf[0] = cast(char)c; }
+ const size_t bytes = c < 0x80 ? 1 : encode(encodeBuf, c);
+
+ if(movedLength > 0)
+ {
+ copy(reader_.buffer_[point..point + movedLength * char.sizeof],
+ reader_.buffer_[point + bytes..point + bytes + movedLength * char.sizeof]);
+ }
+ reader_.buffer_[point .. point + bytes] = encodeBuf[0 .. bytes];
+ end_ += bytes;
+ }
+
+ /// Get the current length of the slice.
+ size_t length() @safe const pure nothrow @nogc
+ {
+ return end_ - start_;
+ }
+
+ /// A slice building transaction.
+ ///
+ /// Can be used to save and revert back to slice state.
+ struct Transaction
+ {
+ private:
+ // The slice builder affected by the transaction.
+ SliceBuilder* builder_;
+ // Index of the return point of the transaction in StringBuilder.endStack_.
+ size_t stackLevel_;
+ // True after commit() has been called.
+ bool committed_;
+
+ public:
+ /// Begins a transaction on a SliceBuilder object.
+ ///
+ /// The transaction must end $(B after) any transactions created within the
+ /// transaction but $(B before) the slice is finish()-ed. A transaction can be
+ /// ended either by commit()-ing or reverting through the destructor.
+ ///
+ /// Saves the current state of a slice.
+ this(SliceBuilder* builder) @safe pure nothrow @nogc
+ {
+ builder_ = builder;
+ stackLevel_ = builder_.endStackUsed_;
+ builder_.push();
+ }
+
+ /// Commit changes to the slice.
+ ///
+ /// Ends the transaction - can only be called once, and removes the possibility
+ /// to revert slice state.
+ ///
+ /// Does nothing for a default-initialized transaction (the transaction has not
+ /// been started yet).
+ void commit() @safe pure nothrow @nogc
+ in(!committed_, "Can't commit a transaction more than once")
+ {
+
+ if(builder_ is null) { return; }
+ assert(builder_.endStackUsed_ == stackLevel_ + 1,
+ "Parent transactions don't fully contain child transactions");
+ builder_.apply();
+ committed_ = true;
+ }
+
+ /// Destroy the transaction and revert it if it hasn't been committed yet.
+ void end() @safe pure nothrow @nogc
+ in(builder_ && builder_.endStackUsed_ == stackLevel_ + 1, "Parent transactions don't fully contain child transactions")
+ {
+ builder_.pop();
+ builder_ = null;
+ }
+
+ }
+
+private:
+ // Push the current end of the slice so we can revert to it if needed.
+ //
+ // Used by Transaction.
+ void push() @safe pure nothrow @nogc
+ in(inProgress, "push called without begin")
+ in(endStackUsed_ < endStack_.length, "Slice stack overflow")
+ {
+ endStack_[endStackUsed_++] = end_;
+ }
+
+ // Pop the current end of endStack_ and set the end of the slice to the popped
+ // value, reverting changes since the old end was pushed.
+ //
+ // Used by Transaction.
+ void pop() @safe pure nothrow @nogc
+ in(inProgress, "pop called without begin")
+ in(endStackUsed_ > 0, "Trying to pop an empty slice stack")
+ {
+ end_ = endStack_[--endStackUsed_];
+ }
+
+ // Pop the current end of endStack_, but keep the current end of the slice, applying
+ // changes made since pushing the old end.
+ //
+ // Used by Transaction.
+ void apply() @safe pure nothrow @nogc
+ in(inProgress, "apply called without begin")
+ in(endStackUsed_ > 0, "Trying to apply an empty slice stack")
+ {
+ --endStackUsed_;
+ }
+}
+
+
+private:
+
+// Convert a UTF-8/16/32 buffer to UTF-8, in-place if possible.
+//
+// Params:
+//
+// input = Buffer with UTF-8/16/32 data to decode. May be overwritten by the
+// conversion, in which case the result will be a slice of this buffer.
+// encoding = Encoding of input.
+//
+// Returns:
+//
+// A struct with the following members:
+//
+// $(D string errorMessage) In case of an error, the error message is stored here. If
+// there was no error, errorMessage is NULL. Always check
+// this first.
+// $(D char[] utf8) input converted to UTF-8. May be a slice of input.
+// $(D size_t characterCount) Number of characters (code points) in input.
+auto toUTF8(ubyte[] input, const UTFEncoding encoding) @safe pure nothrow
+{
+ // Documented in function ddoc.
+ struct Result
+ {
+ string errorMessage;
+ char[] utf8;
+ size_t characterCount;
+ }
+
+ Result result;
+
+ // Encode input_ into UTF-8 if it's encoded as UTF-16 or UTF-32.
+ //
+ // Params:
+ //
+ // buffer = The input buffer to encode.
+ // result = A Result struct to put encoded result and any error messages to.
+ //
+ // On error, result.errorMessage will be set.
+ static void encode(C)(C[] input, ref Result result) @safe pure
+ {
+ // We can do UTF-32->UTF-8 in place because all UTF-8 sequences are 4 or
+ // less bytes.
+ static if(is(C == dchar))
+ {
+ char[4] encodeBuf;
+ auto utf8 = cast(char[])input;
+ auto length = 0;
+ foreach(dchar c; input)
+ {
+ ++result.characterCount;
+ // ASCII
+ if(c < 0x80)
+ {
+ utf8[length++] = cast(char)c;
+ continue;
+ }
+
+ std.utf.encode(encodeBuf, c);
+ const bytes = codeLength!char(c);
+ utf8[length .. length + bytes] = encodeBuf[0 .. bytes];
+ length += bytes;
+ }
+ result.utf8 = utf8[0 .. length];
+ }
+ // Unfortunately we can't do UTF-16 in place so we just use std.conv.to
+ else
+ {
+ result.characterCount = std.utf.count(input);
+ result.utf8 = input.to!(char[]);
+ }
+ }
+
+ try final switch(encoding)
+ {
+ case UTFEncoding.UTF_8:
+ result.utf8 = cast(char[])input;
+ result.utf8.validate();
+ result.characterCount = std.utf.count(result.utf8);
+ break;
+ case UTFEncoding.UTF_16:
+ assert(input.length % 2 == 0, "UTF-16 buffer size must be even");
+ encode(cast(wchar[])input, result);
+ break;
+ case UTFEncoding.UTF_32:
+ assert(input.length % 4 == 0, "UTF-32 buffer size must be a multiple of 4");
+ encode(cast(dchar[])input, result);
+ break;
+ }
+ catch(ConvException e) { result.errorMessage = e.msg; }
+ catch(UTFException e) { result.errorMessage = e.msg; }
+ catch(Exception e)
+ {
+ assert(false, "Unexpected exception in encode(): " ~ e.msg);
+ }
+
+ return result;
+}
+
+/// Determine if all characters (code points, not bytes) in a string are printable.
+bool isPrintableValidUTF8(const char[] chars) @safe pure
+{
+ import std.uni : isControl, isWhite;
+ foreach (dchar chr; chars)
+ {
+ if (!chr.isValidDchar || (chr.isControl && !chr.isWhite))
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Counts the number of ASCII characters in buffer until the first UTF-8 sequence.
+///
+/// Used to determine how many characters we can process without decoding.
+size_t countASCII(const(char)[] buffer) @safe pure nothrow @nogc
+{
+ return buffer.byCodeUnit.until!(x => x > 0x7F).walkLength;
+}
+// Unittests.
+
+void testEndian(R)()
+{
+ void endian_test(ubyte[] data, Encoding encoding_expected, Endian endian_expected)
+ {
+ auto reader = new R(data);
+ assert(reader.encoding == encoding_expected);
+ assert(reader.endian_ == endian_expected);
+ }
+ ubyte[] little_endian_utf_16 = [0xFF, 0xFE, 0x7A, 0x00];
+ ubyte[] big_endian_utf_16 = [0xFE, 0xFF, 0x00, 0x7A];
+ endian_test(little_endian_utf_16, Encoding.UTF_16, Endian.littleEndian);
+ endian_test(big_endian_utf_16, Encoding.UTF_16, Endian.bigEndian);
+}
+
+void testPeekPrefixForward(R)()
+{
+ import std.encoding;
+ ubyte[] data = bomTable[BOM.utf8].sequence ~ cast(ubyte[])"data";
+ auto reader = new R(data);
+ assert(reader.peek() == 'd');
+ assert(reader.peek(1) == 'a');
+ assert(reader.peek(2) == 't');
+ assert(reader.peek(3) == 'a');
+ assert(reader.peek(4) == '\0');
+ assert(reader.prefix(4) == "data");
+ // assert(reader.prefix(6) == "data\0");
+ reader.forward(2);
+ assert(reader.peek(1) == 'a');
+ // assert(collectException(reader.peek(3)));
+}
+
+void testUTF(R)()
+{
+ import std.encoding;
+ dchar[] data = cast(dchar[])"data";
+ void utf_test(T)(T[] data, BOM bom)
+ {
+ ubyte[] bytes = bomTable[bom].sequence ~
+ (cast(ubyte[])data)[0 .. data.length * T.sizeof];
+ auto reader = new R(bytes);
+ assert(reader.peek() == 'd');
+ assert(reader.peek(1) == 'a');
+ assert(reader.peek(2) == 't');
+ assert(reader.peek(3) == 'a');
+ }
+ utf_test!char(to!(char[])(data), BOM.utf8);
+ utf_test!wchar(to!(wchar[])(data), endian == Endian.bigEndian ? BOM.utf16be : BOM.utf16le);
+ utf_test(data, endian == Endian.bigEndian ? BOM.utf32be : BOM.utf32le);
+}
+
+void test1Byte(R)()
+{
+ ubyte[] data = [97];
+
+ auto reader = new R(data);
+ assert(reader.peek() == 'a');
+ assert(reader.peek(1) == '\0');
+ // assert(collectException(reader.peek(2)));
+}
+
+@system unittest
+{
+ testEndian!Reader();
+ testPeekPrefixForward!Reader();
+ testUTF!Reader();
+ test1Byte!Reader();
+}
+//Issue 257 - https://github.com/dlang-community/D-YAML/issues/257
+@safe unittest
+{
+ import dyaml.loader : Loader;
+ auto yaml = "hello ";
+ auto root = Loader.fromString(yaml).load();
+
+ assert(root.isValid);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/representer.d b/src/ext_depends/D-YAML/source/dyaml/representer.d
new file mode 100644
index 0000000..a7ca802
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/representer.d
@@ -0,0 +1,517 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * YAML node _representer. Prepares YAML nodes for output. A tutorial can be
+ * found $(LINK2 ../tutorials/custom_types.html, here).
+ *
+ * Code based on $(LINK2 http://www.pyyaml.org, PyYAML).
+ */
+module dyaml.representer;
+
+
+import std.algorithm;
+import std.array;
+import std.base64;
+import std.container;
+import std.conv;
+import std.datetime;
+import std.exception;
+import std.format;
+import std.math;
+import std.typecons;
+import std.string;
+
+import dyaml.exception;
+import dyaml.node;
+import dyaml.serializer;
+import dyaml.style;
+
+package:
+///Exception thrown on Representer errors.
+class RepresenterException : YAMLException
+{
+ mixin ExceptionCtors;
+}
+
+/**
+ * Represents YAML nodes as scalar, sequence and mapping nodes ready for output.
+ */
+Node representData(const Node data, ScalarStyle defaultScalarStyle, CollectionStyle defaultCollectionStyle) @safe
+{
+ Node result;
+ final switch(data.type)
+ {
+ case NodeType.null_:
+ result = representNull();
+ break;
+ case NodeType.merge:
+ break;
+ case NodeType.boolean:
+ result = representBool(data);
+ break;
+ case NodeType.integer:
+ result = representLong(data);
+ break;
+ case NodeType.decimal:
+ result = representReal(data);
+ break;
+ case NodeType.binary:
+ result = representBytes(data);
+ break;
+ case NodeType.timestamp:
+ result = representSysTime(data);
+ break;
+ case NodeType.string:
+ result = representString(data);
+ break;
+ case NodeType.mapping:
+ result = representPairs(data, defaultScalarStyle, defaultCollectionStyle);
+ break;
+ case NodeType.sequence:
+ result = representNodes(data, defaultScalarStyle, defaultCollectionStyle);
+ break;
+ case NodeType.invalid:
+ assert(0);
+ }
+
+ final switch (result.nodeID)
+ {
+ case NodeID.scalar:
+ if (result.scalarStyle == ScalarStyle.invalid)
+ {
+ result.scalarStyle = defaultScalarStyle;
+ }
+ break;
+ case NodeID.sequence, NodeID.mapping:
+ if (defaultCollectionStyle != CollectionStyle.invalid)
+ {
+ result.collectionStyle = defaultCollectionStyle;
+ }
+ case NodeID.invalid:
+ }
+
+
+ //Override tag if specified.
+ if(data.tag_ !is null){result.tag_ = data.tag_;}
+
+ //Remember style if this was loaded before.
+ if(data.scalarStyle != ScalarStyle.invalid)
+ {
+ result.scalarStyle = data.scalarStyle;
+ }
+ if(data.collectionStyle != CollectionStyle.invalid)
+ {
+ result.collectionStyle = data.collectionStyle;
+ }
+ return result;
+}
+
+@safe unittest
+{
+ // We don't emit yaml merge nodes.
+ assert(representData(Node(YAMLMerge()), ScalarStyle.invalid, CollectionStyle.invalid) == Node.init);
+}
+
+@safe unittest
+{
+ assert(representData(Node(YAMLNull()), ScalarStyle.invalid, CollectionStyle.invalid) == Node("null", "tag:yaml.org,2002:null"));
+}
+
+@safe unittest
+{
+ assert(representData(Node(cast(string)null), ScalarStyle.invalid, CollectionStyle.invalid) == Node("null", "tag:yaml.org,2002:null"));
+ assert(representData(Node("Hello world!"), ScalarStyle.invalid, CollectionStyle.invalid) == Node("Hello world!", "tag:yaml.org,2002:str"));
+}
+
+@safe unittest
+{
+ assert(representData(Node(64), ScalarStyle.invalid, CollectionStyle.invalid) == Node("64", "tag:yaml.org,2002:int"));
+}
+
+@safe unittest
+{
+ assert(representData(Node(true), ScalarStyle.invalid, CollectionStyle.invalid) == Node("true", "tag:yaml.org,2002:bool"));
+ assert(representData(Node(false), ScalarStyle.invalid, CollectionStyle.invalid) == Node("false", "tag:yaml.org,2002:bool"));
+}
+
+@safe unittest
+{
+ // Float comparison is pretty unreliable...
+ auto result = representData(Node(1.0), ScalarStyle.invalid, CollectionStyle.invalid);
+ assert(approxEqual(result.as!string.to!real, 1.0));
+ assert(result.tag == "tag:yaml.org,2002:float");
+
+ assert(representData(Node(real.nan), ScalarStyle.invalid, CollectionStyle.invalid) == Node(".nan", "tag:yaml.org,2002:float"));
+ assert(representData(Node(real.infinity), ScalarStyle.invalid, CollectionStyle.invalid) == Node(".inf", "tag:yaml.org,2002:float"));
+ assert(representData(Node(-real.infinity), ScalarStyle.invalid, CollectionStyle.invalid) == Node("-.inf", "tag:yaml.org,2002:float"));
+}
+
+@safe unittest
+{
+ assert(representData(Node(SysTime(DateTime(2000, 3, 14, 12, 34, 56), UTC())), ScalarStyle.invalid, CollectionStyle.invalid) == Node("2000-03-14T12:34:56Z", "tag:yaml.org,2002:timestamp"));
+}
+
+@safe unittest
+{
+ assert(representData(Node(Node[].init, "tag:yaml.org,2002:set"), ScalarStyle.invalid, CollectionStyle.invalid) == Node(Node.Pair[].init, "tag:yaml.org,2002:set"));
+ assert(representData(Node(Node[].init, "tag:yaml.org,2002:seq"), ScalarStyle.invalid, CollectionStyle.invalid) == Node(Node[].init, "tag:yaml.org,2002:seq"));
+ {
+ auto nodes = [
+ Node("a"),
+ Node("b"),
+ Node("c"),
+ ];
+ assert(representData(Node(nodes, "tag:yaml.org,2002:set"), ScalarStyle.invalid, CollectionStyle.invalid) ==
+ Node([
+ Node.Pair(
+ Node("a", "tag:yaml.org,2002:str"),
+ Node("null", "tag:yaml.org,2002:null")
+ ),
+ Node.Pair(
+ Node("b", "tag:yaml.org,2002:str"),
+ Node("null", "tag:yaml.org,2002:null")
+ ),
+ Node.Pair(
+ Node("c", "tag:yaml.org,2002:str"),
+ Node("null", "tag:yaml.org,2002:null")
+ )
+ ], "tag:yaml.org,2002:set"));
+ }
+ {
+ auto nodes = [
+ Node("a"),
+ Node("b"),
+ Node("c"),
+ ];
+ assert(representData(Node(nodes, "tag:yaml.org,2002:seq"), ScalarStyle.invalid, CollectionStyle.invalid) ==
+ Node([
+ Node("a", "tag:yaml.org,2002:str"),
+ Node("b", "tag:yaml.org,2002:str"),
+ Node("c", "tag:yaml.org,2002:str")
+ ], "tag:yaml.org,2002:seq"));
+ }
+}
+
+@safe unittest
+{
+ assert(representData(Node(Node.Pair[].init, "tag:yaml.org,2002:omap"), ScalarStyle.invalid, CollectionStyle.invalid) == Node(Node[].init, "tag:yaml.org,2002:omap"));
+ assert(representData(Node(Node.Pair[].init, "tag:yaml.org,2002:pairs"), ScalarStyle.invalid, CollectionStyle.invalid) == Node(Node[].init, "tag:yaml.org,2002:pairs"));
+ assert(representData(Node(Node.Pair[].init, "tag:yaml.org,2002:map"), ScalarStyle.invalid, CollectionStyle.invalid) == Node(Node.Pair[].init, "tag:yaml.org,2002:map"));
+ {
+ auto nodes = [
+ Node.Pair("a", "b"),
+ Node.Pair("a", "c")
+ ];
+ assertThrown(representData(Node(nodes, "tag:yaml.org,2002:omap"), ScalarStyle.invalid, CollectionStyle.invalid));
+ }
+ // Yeah, this gets ugly really fast.
+ {
+ auto nodes = [
+ Node.Pair("a", "b"),
+ Node.Pair("a", "c")
+ ];
+ assert(representData(Node(nodes, "tag:yaml.org,2002:pairs"), ScalarStyle.invalid, CollectionStyle.invalid) ==
+ Node([
+ Node(
+ [Node.Pair(
+ Node("a", "tag:yaml.org,2002:str"),
+ Node("b", "tag:yaml.org,2002:str")
+ )],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [Node.Pair(
+ Node("a", "tag:yaml.org,2002:str"),
+ Node("c", "tag:yaml.org,2002:str")
+ )],
+ "tag:yaml.org,2002:map"),
+ ], "tag:yaml.org,2002:pairs"));
+ }
+ {
+ auto nodes = [
+ Node.Pair("a", "b"),
+ Node.Pair("a", "c")
+ ];
+ assertThrown(representData(Node(nodes, "tag:yaml.org,2002:map"), ScalarStyle.invalid, CollectionStyle.invalid));
+ }
+ {
+ auto nodes = [
+ Node.Pair("a", "b"),
+ Node.Pair("c", "d")
+ ];
+ assert(representData(Node(nodes, "tag:yaml.org,2002:omap"), ScalarStyle.invalid, CollectionStyle.invalid) ==
+ Node([
+ Node([
+ Node.Pair(
+ Node("a", "tag:yaml.org,2002:str"),
+ Node("b", "tag:yaml.org,2002:str")
+ )
+ ], "tag:yaml.org,2002:map"),
+ Node([
+ Node.Pair(
+ Node("c", "tag:yaml.org,2002:str"),
+ Node("d", "tag:yaml.org,2002:str")
+ )
+ ], "tag:yaml.org,2002:map"
+ )], "tag:yaml.org,2002:omap"));
+ }
+ {
+ auto nodes = [
+ Node.Pair("a", "b"),
+ Node.Pair("c", "d")
+ ];
+ assert(representData(Node(nodes, "tag:yaml.org,2002:map"), ScalarStyle.invalid, CollectionStyle.invalid) ==
+ Node([
+ Node.Pair(
+ Node("a", "tag:yaml.org,2002:str"),
+ Node("b", "tag:yaml.org,2002:str")
+ ),
+ Node.Pair(
+ Node("c", "tag:yaml.org,2002:str"),
+ Node("d", "tag:yaml.org,2002:str")
+ ),
+ ], "tag:yaml.org,2002:map"));
+ }
+}
+
+private:
+
+//Represent a _null _node as a _null YAML value.
+Node representNull() @safe
+{
+ return Node("null", "tag:yaml.org,2002:null");
+}
+
+//Represent a string _node as a string scalar.
+Node representString(const Node node) @safe
+{
+ string value = node.as!string;
+ return value is null
+ ? Node("null", "tag:yaml.org,2002:null")
+ : Node(value, "tag:yaml.org,2002:str");
+}
+
+//Represent a bytes _node as a binary scalar.
+Node representBytes(const Node node) @safe
+{
+ const ubyte[] value = node.as!(ubyte[]);
+ if(value is null){return Node("null", "tag:yaml.org,2002:null");}
+
+ auto newNode = Node(Base64.encode(value).idup, "tag:yaml.org,2002:binary");
+ newNode.scalarStyle = ScalarStyle.literal;
+ return newNode;
+}
+
+//Represent a bool _node as a bool scalar.
+Node representBool(const Node node) @safe
+{
+ return Node(node.as!bool ? "true" : "false", "tag:yaml.org,2002:bool");
+}
+
+//Represent a long _node as an integer scalar.
+Node representLong(const Node node) @safe
+{
+ return Node(node.as!long.to!string, "tag:yaml.org,2002:int");
+}
+
+//Represent a real _node as a floating point scalar.
+Node representReal(const Node node) @safe
+{
+ real f = node.as!real;
+ string value = isNaN(f) ? ".nan":
+ f == real.infinity ? ".inf":
+ f == -1.0 * real.infinity ? "-.inf":
+ {auto a = appender!string();
+ formattedWrite(a, "%12f", f);
+ return a.data.strip();}();
+
+ return Node(value, "tag:yaml.org,2002:float");
+}
+
+//Represent a SysTime _node as a timestamp.
+Node representSysTime(const Node node) @safe
+{
+ return Node(node.as!SysTime.toISOExtString(), "tag:yaml.org,2002:timestamp");
+}
+
+//Represent a sequence _node as sequence/set.
+Node representNodes(const Node node, ScalarStyle defaultScalarStyle, CollectionStyle defaultCollectionStyle) @safe
+{
+ auto nodes = node.as!(Node[]);
+ if(node.tag_ == "tag:yaml.org,2002:set")
+ {
+ //YAML sets are mapping with null values.
+ Node.Pair[] pairs;
+ pairs.length = nodes.length;
+
+ foreach(idx, key; nodes)
+ {
+ pairs[idx] = Node.Pair(key, Node("null", "tag:yaml.org,2002:null"));
+ }
+ Node.Pair[] value;
+ value.length = pairs.length;
+
+ auto bestStyle = CollectionStyle.flow;
+ foreach(idx, pair; pairs)
+ {
+ value[idx] = Node.Pair(representData(pair.key, defaultScalarStyle, defaultCollectionStyle), representData(pair.value, defaultScalarStyle, defaultCollectionStyle));
+ if(value[idx].shouldUseBlockStyle)
+ {
+ bestStyle = CollectionStyle.block;
+ }
+ }
+
+ auto newNode = Node(value, node.tag_);
+ newNode.collectionStyle = bestStyle;
+ return newNode;
+ }
+ else
+ {
+ Node[] value;
+ value.length = nodes.length;
+
+ auto bestStyle = CollectionStyle.flow;
+ foreach(idx, item; nodes)
+ {
+ value[idx] = representData(item, defaultScalarStyle, defaultCollectionStyle);
+ const isScalar = value[idx].nodeID == NodeID.scalar;
+ const s = value[idx].scalarStyle;
+ if(!isScalar || (s != ScalarStyle.invalid && s != ScalarStyle.plain))
+ {
+ bestStyle = CollectionStyle.block;
+ }
+ }
+
+ auto newNode = Node(value, "tag:yaml.org,2002:seq");
+ newNode.collectionStyle = bestStyle;
+ return newNode;
+ }
+}
+
+bool shouldUseBlockStyle(const Node value) @safe
+{
+ const isScalar = value.nodeID == NodeID.scalar;
+ const s = value.scalarStyle;
+ return (!isScalar || (s != ScalarStyle.invalid && s != ScalarStyle.plain));
+}
+bool shouldUseBlockStyle(const Node.Pair value) @safe
+{
+ const keyScalar = value.key.nodeID == NodeID.scalar;
+ const valScalar = value.value.nodeID == NodeID.scalar;
+ const keyStyle = value.key.scalarStyle;
+ const valStyle = value.value.scalarStyle;
+ if(!keyScalar ||
+ (keyStyle != ScalarStyle.invalid && keyStyle != ScalarStyle.plain))
+ {
+ return true;
+ }
+ if(!valScalar ||
+ (valStyle != ScalarStyle.invalid && valStyle != ScalarStyle.plain))
+ {
+ return true;
+ }
+ return false;
+}
+
+//Represent a mapping _node as map/ordered map/pairs.
+Node representPairs(const Node node, ScalarStyle defaultScalarStyle, CollectionStyle defaultCollectionStyle) @safe
+{
+ auto pairs = node.as!(Node.Pair[]);
+
+ bool hasDuplicates(const Node.Pair[] pairs) @safe
+ {
+ //TODO this should be replaced by something with deterministic memory allocation.
+ auto keys = redBlackTree!Node();
+ foreach(pair; pairs)
+ {
+ if(pair.key in keys){return true;}
+ keys.insert(pair.key);
+ }
+ return false;
+ }
+
+ Node[] mapToSequence(const Node.Pair[] pairs) @safe
+ {
+ Node[] nodes;
+ nodes.length = pairs.length;
+ foreach(idx, pair; pairs)
+ {
+ Node.Pair value;
+
+ auto bestStyle = value.shouldUseBlockStyle ? CollectionStyle.block : CollectionStyle.flow;
+ value = Node.Pair(representData(pair.key, defaultScalarStyle, defaultCollectionStyle), representData(pair.value, defaultScalarStyle, defaultCollectionStyle));
+
+ auto newNode = Node([value], "tag:yaml.org,2002:map");
+ newNode.collectionStyle = bestStyle;
+ nodes[idx] = newNode;
+ }
+ return nodes;
+ }
+
+ if(node.tag_ == "tag:yaml.org,2002:omap")
+ {
+ enforce(!hasDuplicates(pairs),
+ new RepresenterException("Duplicate entry in an ordered map"));
+ auto sequence = mapToSequence(pairs);
+ Node[] value;
+ value.length = sequence.length;
+
+ auto bestStyle = CollectionStyle.flow;
+ foreach(idx, item; sequence)
+ {
+ value[idx] = representData(item, defaultScalarStyle, defaultCollectionStyle);
+ if(value[idx].shouldUseBlockStyle)
+ {
+ bestStyle = CollectionStyle.block;
+ }
+ }
+
+ auto newNode = Node(value, node.tag_);
+ newNode.collectionStyle = bestStyle;
+ return newNode;
+ }
+ else if(node.tag_ == "tag:yaml.org,2002:pairs")
+ {
+ auto sequence = mapToSequence(pairs);
+ Node[] value;
+ value.length = sequence.length;
+
+ auto bestStyle = CollectionStyle.flow;
+ foreach(idx, item; sequence)
+ {
+ value[idx] = representData(item, defaultScalarStyle, defaultCollectionStyle);
+ if(value[idx].shouldUseBlockStyle)
+ {
+ bestStyle = CollectionStyle.block;
+ }
+ }
+
+ auto newNode = Node(value, node.tag_);
+ newNode.collectionStyle = bestStyle;
+ return newNode;
+ }
+ else
+ {
+ enforce(!hasDuplicates(pairs),
+ new RepresenterException("Duplicate entry in an unordered map"));
+ Node.Pair[] value;
+ value.length = pairs.length;
+
+ auto bestStyle = CollectionStyle.flow;
+ foreach(idx, pair; pairs)
+ {
+ value[idx] = Node.Pair(representData(pair.key, defaultScalarStyle, defaultCollectionStyle), representData(pair.value, defaultScalarStyle, defaultCollectionStyle));
+ if(value[idx].shouldUseBlockStyle)
+ {
+ bestStyle = CollectionStyle.block;
+ }
+ }
+
+ auto newNode = Node(value, "tag:yaml.org,2002:map");
+ newNode.collectionStyle = bestStyle;
+ return newNode;
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/resolver.d b/src/ext_depends/D-YAML/source/dyaml/resolver.d
new file mode 100644
index 0000000..ceed1e5
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/resolver.d
@@ -0,0 +1,261 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * Implements a class that resolves YAML tags. This can be used to implicitly
+ * resolve tags for custom data types, removing the need to explicitly
+ * specify tags in YAML. A tutorial can be found
+ * $(LINK2 ../tutorials/custom_types.html, here).
+ *
+ * Code based on $(LINK2 http://www.pyyaml.org, PyYAML).
+ */
+module dyaml.resolver;
+
+
+import std.conv;
+import std.regex;
+import std.typecons;
+import std.utf;
+
+import dyaml.node;
+import dyaml.exception;
+
+
+/// Type of `regexes`
+private alias RegexType = Tuple!(string, "tag", const Regex!char, "regexp", string, "chars");
+
+private immutable RegexType[] regexes;
+
+shared static this() @safe
+{
+ RegexType[] tmp;
+ tmp ~= RegexType("tag:yaml.org,2002:bool",
+ regex(r"^(?:yes|Yes|YES|no|No|NO|true|True|TRUE" ~
+ "|false|False|FALSE|on|On|ON|off|Off|OFF)$"),
+ "yYnNtTfFoO");
+ tmp ~= RegexType("tag:yaml.org,2002:float",
+ regex(r"^(?:[-+]?([0-9][0-9_]*)\\.[0-9_]*" ~
+ "(?:[eE][-+][0-9]+)?|[-+]?(?:[0-9][0-9_]" ~
+ "*)?\\.[0-9_]+(?:[eE][-+][0-9]+)?|[-+]?" ~
+ "[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]" ~
+ "*|[-+]?\\.(?:inf|Inf|INF)|\\." ~
+ "(?:nan|NaN|NAN))$"),
+ "-+0123456789.");
+ tmp ~= RegexType("tag:yaml.org,2002:int",
+ regex(r"^(?:[-+]?0b[0-1_]+" ~
+ "|[-+]?0[0-7_]+" ~
+ "|[-+]?(?:0|[1-9][0-9_]*)" ~
+ "|[-+]?0x[0-9a-fA-F_]+" ~
+ "|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$"),
+ "-+0123456789");
+ tmp ~= RegexType("tag:yaml.org,2002:merge", regex(r"^<<$"), "<");
+ tmp ~= RegexType("tag:yaml.org,2002:null",
+ regex(r"^$|^(?:~|null|Null|NULL)$"), "~nN\0");
+ tmp ~= RegexType("tag:yaml.org,2002:timestamp",
+ regex(r"^[0-9][0-9][0-9][0-9]-[0-9][0-9]-" ~
+ "[0-9][0-9]|[0-9][0-9][0-9][0-9]-[0-9]" ~
+ "[0-9]?-[0-9][0-9]?[Tt]|[ \t]+[0-9]" ~
+ "[0-9]?:[0-9][0-9]:[0-9][0-9]" ~
+ "(?:\\.[0-9]*)?(?:[ \t]*Z|[-+][0-9]" ~
+ "[0-9]?(?::[0-9][0-9])?)?$"),
+ "0123456789");
+ tmp ~= RegexType("tag:yaml.org,2002:value", regex(r"^=$"), "=");
+
+
+ //The following resolver is only for documentation purposes. It cannot work
+ //because plain scalars cannot start with '!', '&', or '*'.
+ tmp ~= RegexType("tag:yaml.org,2002:yaml", regex(r"^(?:!|&|\*)$"), "!&*");
+
+ regexes = () @trusted { return cast(immutable)tmp; }();
+}
+
+/**
+ * Resolves YAML tags (data types).
+ *
+ * Can be used to implicitly resolve custom data types of scalar values.
+ */
+struct Resolver
+{
+ private:
+ // Default tag to use for scalars.
+ string defaultScalarTag_ = "tag:yaml.org,2002:str";
+ // Default tag to use for sequences.
+ string defaultSequenceTag_ = "tag:yaml.org,2002:seq";
+ // Default tag to use for mappings.
+ string defaultMappingTag_ = "tag:yaml.org,2002:map";
+
+ /*
+ * Arrays of scalar resolver tuples indexed by starting character of a scalar.
+ *
+ * Each tuple stores regular expression the scalar must match,
+ * and tag to assign to it if it matches.
+ */
+ Tuple!(string, const Regex!char)[][dchar] yamlImplicitResolvers_;
+
+ package:
+ static auto withDefaultResolvers() @safe
+ {
+ Resolver resolver;
+ foreach(pair; regexes)
+ {
+ resolver.addImplicitResolver(pair.tag, pair.regexp, pair.chars);
+ }
+ return resolver;
+ }
+
+ public:
+ @disable bool opEquals(ref Resolver);
+ @disable int opCmp(ref Resolver);
+
+ /**
+ * Add an implicit scalar resolver.
+ *
+ * If a scalar matches regexp and starts with any character in first,
+ * its _tag is set to tag. If it matches more than one resolver _regexp
+ * resolvers added _first override ones added later. Default resolvers
+ * override any user specified resolvers, but they can be disabled in
+ * Resolver constructor.
+ *
+ * If a scalar is not resolved to anything, it is assigned the default
+ * YAML _tag for strings.
+ *
+ * Params: tag = Tag to resolve to.
+ * regexp = Regular expression the scalar must match to have this _tag.
+ * first = String of possible starting characters of the scalar.
+ *
+ */
+ void addImplicitResolver(string tag, const Regex!char regexp, string first)
+ pure @safe
+ {
+ foreach(const dchar c; first)
+ {
+ if((c in yamlImplicitResolvers_) is null)
+ {
+ yamlImplicitResolvers_[c] = [];
+ }
+ yamlImplicitResolvers_[c] ~= tuple(tag, regexp);
+ }
+ }
+ /// Resolve scalars starting with 'A' to !_tag
+ @safe unittest
+ {
+ import std.file : write;
+ import std.regex : regex;
+ import dyaml.loader : Loader;
+ import dyaml.resolver : Resolver;
+
+ write("example.yaml", "A");
+
+ auto loader = Loader.fromFile("example.yaml");
+ loader.resolver.addImplicitResolver("!tag", regex("A.*"), "A");
+
+ auto node = loader.load();
+ assert(node.tag == "!tag");
+ }
+
+ package:
+ /**
+ * Resolve tag of a node.
+ *
+ * Params: kind = Type of the node.
+ * tag = Explicit tag of the node, if any.
+ * value = Value of the node, if any.
+ * implicit = Should the node be implicitly resolved?
+ *
+ * If the tag is already specified and not non-specific, that tag will
+ * be returned.
+ *
+ * Returns: Resolved tag.
+ */
+ string resolve(const NodeID kind, const string tag, const string value,
+ const bool implicit) @safe
+ {
+ import std.array : empty, front;
+ if((tag !is null) && (tag != "!"))
+ {
+ return tag;
+ }
+
+ final switch (kind)
+ {
+ case NodeID.scalar:
+ if(!implicit)
+ {
+ return defaultScalarTag_;
+ }
+
+ //Get the first char of the value.
+ const dchar first = value.empty ? '\0' : value.front;
+
+ auto resolvers = (first in yamlImplicitResolvers_) is null ?
+ [] : yamlImplicitResolvers_[first];
+
+ //If regexp matches, return tag.
+ foreach(resolver; resolvers)
+ {
+ if(!(match(value, resolver[1]).empty))
+ {
+ return resolver[0];
+ }
+ }
+ return defaultScalarTag_;
+ case NodeID.sequence:
+ return defaultSequenceTag_;
+ case NodeID.mapping:
+ return defaultMappingTag_;
+ case NodeID.invalid:
+ assert(false, "Cannot resolve an invalid node");
+ }
+ }
+ @safe unittest
+ {
+ auto resolver = Resolver.withDefaultResolvers;
+
+ bool tagMatch(string tag, string[] values) @safe
+ {
+ const string expected = tag;
+ foreach(value; values)
+ {
+ const string resolved = resolver.resolve(NodeID.scalar, null, value, true);
+ if(expected != resolved)
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ assert(tagMatch("tag:yaml.org,2002:bool",
+ ["yes", "NO", "True", "on"]));
+ assert(tagMatch("tag:yaml.org,2002:float",
+ ["6.8523015e+5", "685.230_15e+03", "685_230.15",
+ "190:20:30.15", "-.inf", ".NaN"]));
+ assert(tagMatch("tag:yaml.org,2002:int",
+ ["685230", "+685_230", "02472256", "0x_0A_74_AE",
+ "0b1010_0111_0100_1010_1110", "190:20:30"]));
+ assert(tagMatch("tag:yaml.org,2002:merge", ["<<"]));
+ assert(tagMatch("tag:yaml.org,2002:null", ["~", "null", ""]));
+ assert(tagMatch("tag:yaml.org,2002:str",
+ ["abcd", "9a8b", "9.1adsf"]));
+ assert(tagMatch("tag:yaml.org,2002:timestamp",
+ ["2001-12-15T02:59:43.1Z",
+ "2001-12-14t21:59:43.10-05:00",
+ "2001-12-14 21:59:43.10 -5",
+ "2001-12-15 2:59:43.10",
+ "2002-12-14"]));
+ assert(tagMatch("tag:yaml.org,2002:value", ["="]));
+ assert(tagMatch("tag:yaml.org,2002:yaml", ["!", "&", "*"]));
+ }
+
+ ///Returns: Default scalar tag.
+ @property string defaultScalarTag() const pure @safe nothrow {return defaultScalarTag_;}
+
+ ///Returns: Default sequence tag.
+ @property string defaultSequenceTag() const pure @safe nothrow {return defaultSequenceTag_;}
+
+ ///Returns: Default mapping tag.
+ @property string defaultMappingTag() const pure @safe nothrow {return defaultMappingTag_;}
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/scanner.d b/src/ext_depends/D-YAML/source/dyaml/scanner.d
new file mode 100644
index 0000000..2009521
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/scanner.d
@@ -0,0 +1,1788 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/// YAML scanner.
+/// Code based on PyYAML: http://www.pyyaml.org
+module dyaml.scanner;
+
+
+import core.stdc.string;
+
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.ascii : isAlphaNum, isDigit, isHexDigit;
+import std.exception;
+import std.string;
+import std.typecons;
+import std.traits : Unqual;
+import std.utf;
+
+import dyaml.escapes;
+import dyaml.exception;
+import dyaml.queue;
+import dyaml.reader;
+import dyaml.style;
+import dyaml.token;
+
+package:
+/// Scanner produces tokens of the following types:
+/// STREAM-START
+/// STREAM-END
+/// DIRECTIVE(name, value)
+/// DOCUMENT-START
+/// DOCUMENT-END
+/// BLOCK-SEQUENCE-START
+/// BLOCK-MAPPING-START
+/// BLOCK-END
+/// FLOW-SEQUENCE-START
+/// FLOW-MAPPING-START
+/// FLOW-SEQUENCE-END
+/// FLOW-MAPPING-END
+/// BLOCK-ENTRY
+/// FLOW-ENTRY
+/// KEY
+/// VALUE
+/// ALIAS(value)
+/// ANCHOR(value)
+/// TAG(value)
+/// SCALAR(value, plain, style)
+
+alias isBreak = among!('\0', '\n', '\r', '\u0085', '\u2028', '\u2029');
+
+alias isBreakOrSpace = among!(' ', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029');
+
+alias isWhiteSpace = among!(' ', '\t', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029');
+
+alias isNonLinebreakWhitespace = among!(' ', '\t');
+
+alias isNonScalarStartCharacter = among!('-', '?', ':', ',', '[', ']', '{', '}',
+ '#', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`', ' ', '\t', '\0', '\n',
+ '\r', '\u0085', '\u2028', '\u2029');
+
+alias isURIChar = among!('-', ';', '/', '?', ':', '@', '&', '=', '+', '$', ',',
+ '_', '.', '!', '~', '*', '\'', '(', ')', '[', ']', '%');
+
+alias isNSChar = among!(' ', '\n', '\r', '\u0085', '\u2028', '\u2029');
+
+alias isBChar = among!('\n', '\r', '\u0085', '\u2028', '\u2029');
+
+alias isFlowScalarBreakSpace = among!(' ', '\t', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029', '\'', '"', '\\');
+
+/// Marked exception thrown at scanner errors.
+///
+/// See_Also: MarkedYAMLException
+class ScannerException : MarkedYAMLException
+{
+ mixin MarkedExceptionCtors;
+}
+
+/// Generates tokens from data provided by a Reader.
+struct Scanner
+{
+ private:
+ /// A simple key is a key that is not denoted by the '?' indicator.
+ /// For example:
+ /// ---
+ /// block simple key: value
+ /// ? not a simple key:
+ /// : { flow simple key: value }
+ /// We emit the KEY token before all keys, so when we find a potential simple
+ /// key, we try to locate the corresponding ':' indicator. Simple keys should be
+ /// limited to a single line and 1024 characters.
+ ///
+ /// 16 bytes on 64-bit.
+ static struct SimpleKey
+ {
+ /// Character index in reader where the key starts.
+ uint charIndex = uint.max;
+ /// Index of the key token from start (first token scanned being 0).
+ uint tokenIndex;
+ /// Line the key starts at.
+ uint line;
+ /// Column the key starts at.
+ ushort column;
+ /// Is this required to be a simple key?
+ bool required;
+ /// Is this struct "null" (invalid)?.
+ bool isNull;
+ }
+
+ /// Block chomping types.
+ enum Chomping
+ {
+ /// Strip all trailing line breaks. '-' indicator.
+ strip,
+ /// Line break of the last line is preserved, others discarded. Default.
+ clip,
+ /// All trailing line breaks are preserved. '+' indicator.
+ keep
+ }
+
+ /// Reader used to read from a file/stream.
+ Reader reader_;
+ /// Are we done scanning?
+ bool done_;
+
+ /// Level of nesting in flow context. If 0, we're in block context.
+ uint flowLevel_;
+ /// Current indentation level.
+ int indent_ = -1;
+ /// Past indentation levels. Used as a stack.
+ Appender!(int[]) indents_;
+
+ /// Processed tokens not yet emitted. Used as a queue.
+ Queue!Token tokens_;
+
+ /// Number of tokens emitted through the getToken method.
+ uint tokensTaken_;
+
+ /// Can a simple key start at the current position? A simple key may start:
+ /// - at the beginning of the line, not counting indentation spaces
+ /// (in block context),
+ /// - after '{', '[', ',' (in the flow context),
+ /// - after '?', ':', '-' (in the block context).
+ /// In the block context, this flag also signifies if a block collection
+ /// may start at the current position.
+ bool allowSimpleKey_ = true;
+
+ /// Possible simple keys indexed by flow levels.
+ SimpleKey[] possibleSimpleKeys_;
+
+ public:
+ /// Construct a Scanner using specified Reader.
+ this(Reader reader) @safe nothrow
+ {
+ // Return the next token, but do not delete it from the queue
+ reader_ = reader;
+ fetchStreamStart();
+ }
+
+ /// Advance to the next token
+ void popFront() @safe
+ {
+ ++tokensTaken_;
+ tokens_.pop();
+ }
+
+ /// Return the current token
+ const(Token) front() @safe
+ {
+ enforce(!empty, "No token left to peek");
+ return tokens_.peek();
+ }
+
+ /// Return whether there are any more tokens left.
+ bool empty() @safe
+ {
+ while (needMoreTokens())
+ {
+ fetchToken();
+ }
+ return tokens_.empty;
+ }
+
+ private:
+ /// Most scanning error messages have the same format; so build them with this
+ /// function.
+ string expected(T)(string expected, T found)
+ {
+ return text("expected ", expected, ", but found ", found);
+ }
+
+ /// Determine whether or not we need to fetch more tokens before peeking/getting a token.
+ bool needMoreTokens() @safe pure
+ {
+ if(done_) { return false; }
+ if(tokens_.empty) { return true; }
+
+ /// The current token may be a potential simple key, so we need to look further.
+ stalePossibleSimpleKeys();
+ return nextPossibleSimpleKey() == tokensTaken_;
+ }
+
+ /// Fetch at token, adding it to tokens_.
+ void fetchToken() @safe
+ {
+ // Eat whitespaces and comments until we reach the next token.
+ scanToNextToken();
+
+ // Remove obsolete possible simple keys.
+ stalePossibleSimpleKeys();
+
+ // Compare current indentation and column. It may add some tokens
+ // and decrease the current indentation level.
+ unwindIndent(reader_.column);
+
+ // Get the next character.
+ const dchar c = reader_.peekByte();
+
+ // Fetch the token.
+ if(c == '\0') { return fetchStreamEnd(); }
+ if(checkDirective()) { return fetchDirective(); }
+ if(checkDocumentStart()) { return fetchDocumentStart(); }
+ if(checkDocumentEnd()) { return fetchDocumentEnd(); }
+ // Order of the following checks is NOT significant.
+ switch(c)
+ {
+ case '[': return fetchFlowSequenceStart();
+ case '{': return fetchFlowMappingStart();
+ case ']': return fetchFlowSequenceEnd();
+ case '}': return fetchFlowMappingEnd();
+ case ',': return fetchFlowEntry();
+ case '!': return fetchTag();
+ case '\'': return fetchSingle();
+ case '\"': return fetchDouble();
+ case '*': return fetchAlias();
+ case '&': return fetchAnchor();
+ case '?': if(checkKey()) { return fetchKey(); } goto default;
+ case ':': if(checkValue()) { return fetchValue(); } goto default;
+ case '-': if(checkBlockEntry()) { return fetchBlockEntry(); } goto default;
+ case '|': if(flowLevel_ == 0) { return fetchLiteral(); } break;
+ case '>': if(flowLevel_ == 0) { return fetchFolded(); } break;
+ default: if(checkPlain()) { return fetchPlain(); }
+ }
+
+ throw new ScannerException("While scanning for the next token, found character " ~
+ "\'%s\', index %s that cannot start any token"
+ .format(c, to!int(c)), reader_.mark);
+ }
+
+
+ /// Return the token number of the nearest possible simple key.
+ uint nextPossibleSimpleKey() @safe pure nothrow @nogc
+ {
+ uint minTokenNumber = uint.max;
+ foreach(k, ref simpleKey; possibleSimpleKeys_)
+ {
+ if(simpleKey.isNull) { continue; }
+ minTokenNumber = min(minTokenNumber, simpleKey.tokenIndex);
+ }
+ return minTokenNumber;
+ }
+
+ /// Remove entries that are no longer possible simple keys.
+ ///
+ /// According to the YAML specification, simple keys
+ /// - should be limited to a single line,
+ /// - should be no longer than 1024 characters.
+ /// Disabling this will allow simple keys of any length and
+ /// height (may cause problems if indentation is broken though).
+ void stalePossibleSimpleKeys() @safe pure
+ {
+ foreach(level, ref key; possibleSimpleKeys_)
+ {
+ if(key.isNull) { continue; }
+ if(key.line != reader_.line || reader_.charIndex - key.charIndex > 1024)
+ {
+ enforce(!key.required,
+ new ScannerException("While scanning a simple key",
+ Mark(reader_.name, key.line, key.column),
+ "could not find expected ':'", reader_.mark));
+ key.isNull = true;
+ }
+ }
+ }
+
+ /// Check if the next token starts a possible simple key and if so, save its position.
+ ///
+ /// This function is called for ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+ void savePossibleSimpleKey() @safe pure
+ {
+ // Check if a simple key is required at the current position.
+ const required = (flowLevel_ == 0 && indent_ == reader_.column);
+ assert(allowSimpleKey_ || !required, "A simple key is required only if it is " ~
+ "the first token in the current line. Therefore it is always allowed.");
+
+ if(!allowSimpleKey_) { return; }
+
+ // The next token might be a simple key, so save its number and position.
+ removePossibleSimpleKey();
+ const tokenCount = tokensTaken_ + cast(uint)tokens_.length;
+
+ const line = reader_.line;
+ const column = reader_.column;
+ const key = SimpleKey(cast(uint)reader_.charIndex, tokenCount, line,
+ cast(ushort)min(column, ushort.max), required);
+
+ if(possibleSimpleKeys_.length <= flowLevel_)
+ {
+ const oldLength = possibleSimpleKeys_.length;
+ possibleSimpleKeys_.length = flowLevel_ + 1;
+ //No need to initialize the last element, it's already done in the next line.
+ possibleSimpleKeys_[oldLength .. flowLevel_] = SimpleKey.init;
+ }
+ possibleSimpleKeys_[flowLevel_] = key;
+ }
+
+ /// Remove the saved possible key position at the current flow level.
+ void removePossibleSimpleKey() @safe pure
+ {
+ if(possibleSimpleKeys_.length <= flowLevel_) { return; }
+
+ if(!possibleSimpleKeys_[flowLevel_].isNull)
+ {
+ const key = possibleSimpleKeys_[flowLevel_];
+ enforce(!key.required,
+ new ScannerException("While scanning a simple key",
+ Mark(reader_.name, key.line, key.column),
+ "could not find expected ':'", reader_.mark));
+ possibleSimpleKeys_[flowLevel_].isNull = true;
+ }
+ }
+
+ /// Decrease indentation, removing entries in indents_.
+ ///
+ /// Params: column = Current column in the file/stream.
+ void unwindIndent(const int column) @safe
+ {
+ if(flowLevel_ > 0)
+ {
+ // In flow context, tokens should respect indentation.
+ // The condition should be `indent >= column` according to the spec.
+ // But this condition will prohibit intuitively correct
+ // constructions such as
+ // key : {
+ // }
+
+ // In the flow context, indentation is ignored. We make the scanner less
+ // restrictive than what the specification requires.
+ // if(pedantic_ && flowLevel_ > 0 && indent_ > column)
+ // {
+ // throw new ScannerException("Invalid intendation or unclosed '[' or '{'",
+ // reader_.mark)
+ // }
+ return;
+ }
+
+ // In block context, we may need to issue the BLOCK-END tokens.
+ while(indent_ > column)
+ {
+ indent_ = indents_.data.back;
+ assert(indents_.data.length);
+ indents_.shrinkTo(indents_.data.length - 1);
+ tokens_.push(blockEndToken(reader_.mark, reader_.mark));
+ }
+ }
+
+ /// Increase indentation if needed.
+ ///
+ /// Params: column = Current column in the file/stream.
+ ///
+ /// Returns: true if the indentation was increased, false otherwise.
+ bool addIndent(int column) @safe
+ {
+ if(indent_ >= column){return false;}
+ indents_ ~= indent_;
+ indent_ = column;
+ return true;
+ }
+
+
+ /// Add STREAM-START token.
+ void fetchStreamStart() @safe nothrow
+ {
+ tokens_.push(streamStartToken(reader_.mark, reader_.mark, reader_.encoding));
+ }
+
+ ///Add STREAM-END token.
+ void fetchStreamEnd() @safe
+ {
+ //Set intendation to -1 .
+ unwindIndent(-1);
+ removePossibleSimpleKey();
+ allowSimpleKey_ = false;
+ possibleSimpleKeys_.destroy;
+
+ tokens_.push(streamEndToken(reader_.mark, reader_.mark));
+ done_ = true;
+ }
+
+ /// Add DIRECTIVE token.
+ void fetchDirective() @safe
+ {
+ // Set intendation to -1 .
+ unwindIndent(-1);
+ // Reset simple keys.
+ removePossibleSimpleKey();
+ allowSimpleKey_ = false;
+
+ auto directive = scanDirective();
+ tokens_.push(directive);
+ }
+
+ /// Add DOCUMENT-START or DOCUMENT-END token.
+ void fetchDocumentIndicator(TokenID id)()
+ if(id == TokenID.documentStart || id == TokenID.documentEnd)
+ {
+ // Set indentation to -1 .
+ unwindIndent(-1);
+ // Reset simple keys. Note that there can't be a block collection after '---'.
+ removePossibleSimpleKey();
+ allowSimpleKey_ = false;
+
+ Mark startMark = reader_.mark;
+ reader_.forward(3);
+ tokens_.push(simpleToken!id(startMark, reader_.mark));
+ }
+
+ /// Aliases to add DOCUMENT-START or DOCUMENT-END token.
+ alias fetchDocumentStart = fetchDocumentIndicator!(TokenID.documentStart);
+ alias fetchDocumentEnd = fetchDocumentIndicator!(TokenID.documentEnd);
+
+ /// Add FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+ void fetchFlowCollectionStart(TokenID id)() @safe
+ {
+ // '[' and '{' may start a simple key.
+ savePossibleSimpleKey();
+ // Simple keys are allowed after '[' and '{'.
+ allowSimpleKey_ = true;
+ ++flowLevel_;
+
+ Mark startMark = reader_.mark;
+ reader_.forward();
+ tokens_.push(simpleToken!id(startMark, reader_.mark));
+ }
+
+ /// Aliases to add FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+ alias fetchFlowSequenceStart = fetchFlowCollectionStart!(TokenID.flowSequenceStart);
+ alias fetchFlowMappingStart = fetchFlowCollectionStart!(TokenID.flowMappingStart);
+
+ /// Add FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+ void fetchFlowCollectionEnd(TokenID id)()
+ {
+ // Reset possible simple key on the current level.
+ removePossibleSimpleKey();
+ // No simple keys after ']' and '}'.
+ allowSimpleKey_ = false;
+ --flowLevel_;
+
+ Mark startMark = reader_.mark;
+ reader_.forward();
+ tokens_.push(simpleToken!id(startMark, reader_.mark));
+ }
+
+ /// Aliases to add FLOW-SEQUENCE-START or FLOW-MAPPING-START token/
+ alias fetchFlowSequenceEnd = fetchFlowCollectionEnd!(TokenID.flowSequenceEnd);
+ alias fetchFlowMappingEnd = fetchFlowCollectionEnd!(TokenID.flowMappingEnd);
+
+ /// Add FLOW-ENTRY token;
+ void fetchFlowEntry() @safe
+ {
+ // Reset possible simple key on the current level.
+ removePossibleSimpleKey();
+ // Simple keys are allowed after ','.
+ allowSimpleKey_ = true;
+
+ Mark startMark = reader_.mark;
+ reader_.forward();
+ tokens_.push(flowEntryToken(startMark, reader_.mark));
+ }
+
+ /// Additional checks used in block context in fetchBlockEntry and fetchKey.
+ ///
+ /// Params: type = String representing the token type we might need to add.
+ /// id = Token type we might need to add.
+ void blockChecks(string type, TokenID id)()
+ {
+ enum context = type ~ " keys are not allowed here";
+ // Are we allowed to start a key (not neccesarily a simple one)?
+ enforce(allowSimpleKey_, new ScannerException(context, reader_.mark));
+
+ if(addIndent(reader_.column))
+ {
+ tokens_.push(simpleToken!id(reader_.mark, reader_.mark));
+ }
+ }
+
+ /// Add BLOCK-ENTRY token. Might add BLOCK-SEQUENCE-START in the process.
+ void fetchBlockEntry() @safe
+ {
+ if(flowLevel_ == 0) { blockChecks!("Sequence", TokenID.blockSequenceStart)(); }
+
+ // It's an error for the block entry to occur in the flow context,
+ // but we let the parser detect this.
+
+ // Reset possible simple key on the current level.
+ removePossibleSimpleKey();
+ // Simple keys are allowed after '-'.
+ allowSimpleKey_ = true;
+
+ Mark startMark = reader_.mark;
+ reader_.forward();
+ tokens_.push(blockEntryToken(startMark, reader_.mark));
+ }
+
+ /// Add KEY token. Might add BLOCK-MAPPING-START in the process.
+ void fetchKey() @safe
+ {
+ if(flowLevel_ == 0) { blockChecks!("Mapping", TokenID.blockMappingStart)(); }
+
+ // Reset possible simple key on the current level.
+ removePossibleSimpleKey();
+ // Simple keys are allowed after '?' in the block context.
+ allowSimpleKey_ = (flowLevel_ == 0);
+
+ Mark startMark = reader_.mark;
+ reader_.forward();
+ tokens_.push(keyToken(startMark, reader_.mark));
+ }
+
+ /// Add VALUE token. Might add KEY and/or BLOCK-MAPPING-START in the process.
+ void fetchValue() @safe
+ {
+ //Do we determine a simple key?
+ if(possibleSimpleKeys_.length > flowLevel_ &&
+ !possibleSimpleKeys_[flowLevel_].isNull)
+ {
+ const key = possibleSimpleKeys_[flowLevel_];
+ possibleSimpleKeys_[flowLevel_].isNull = true;
+ Mark keyMark = Mark(reader_.name, key.line, key.column);
+ const idx = key.tokenIndex - tokensTaken_;
+
+ assert(idx >= 0);
+
+ // Add KEY.
+ // Manually inserting since tokens are immutable (need linked list).
+ tokens_.insert(keyToken(keyMark, keyMark), idx);
+
+ // If this key starts a new block mapping, we need to add BLOCK-MAPPING-START.
+ if(flowLevel_ == 0 && addIndent(key.column))
+ {
+ tokens_.insert(blockMappingStartToken(keyMark, keyMark), idx);
+ }
+
+ // There cannot be two simple keys in a row.
+ allowSimpleKey_ = false;
+ }
+ // Part of a complex key
+ else
+ {
+ // We can start a complex value if and only if we can start a simple key.
+ enforce(flowLevel_ > 0 || allowSimpleKey_,
+ new ScannerException("Mapping values are not allowed here", reader_.mark));
+
+ // If this value starts a new block mapping, we need to add
+ // BLOCK-MAPPING-START. It'll be detected as an error later by the parser.
+ if(flowLevel_ == 0 && addIndent(reader_.column))
+ {
+ tokens_.push(blockMappingStartToken(reader_.mark, reader_.mark));
+ }
+
+ // Reset possible simple key on the current level.
+ removePossibleSimpleKey();
+ // Simple keys are allowed after ':' in the block context.
+ allowSimpleKey_ = (flowLevel_ == 0);
+ }
+
+ // Add VALUE.
+ Mark startMark = reader_.mark;
+ reader_.forward();
+ tokens_.push(valueToken(startMark, reader_.mark));
+ }
+
+ /// Add ALIAS or ANCHOR token.
+ void fetchAnchor_(TokenID id)() @safe
+ if(id == TokenID.alias_ || id == TokenID.anchor)
+ {
+ // ALIAS/ANCHOR could be a simple key.
+ savePossibleSimpleKey();
+ // No simple keys after ALIAS/ANCHOR.
+ allowSimpleKey_ = false;
+
+ auto anchor = scanAnchor(id);
+ tokens_.push(anchor);
+ }
+
+ /// Aliases to add ALIAS or ANCHOR token.
+ alias fetchAlias = fetchAnchor_!(TokenID.alias_);
+ alias fetchAnchor = fetchAnchor_!(TokenID.anchor);
+
+ /// Add TAG token.
+ void fetchTag() @safe
+ {
+ //TAG could start a simple key.
+ savePossibleSimpleKey();
+ //No simple keys after TAG.
+ allowSimpleKey_ = false;
+
+ tokens_.push(scanTag());
+ }
+
+ /// Add block SCALAR token.
+ void fetchBlockScalar(ScalarStyle style)() @safe
+ if(style == ScalarStyle.literal || style == ScalarStyle.folded)
+ {
+ // Reset possible simple key on the current level.
+ removePossibleSimpleKey();
+ // A simple key may follow a block scalar.
+ allowSimpleKey_ = true;
+
+ auto blockScalar = scanBlockScalar(style);
+ tokens_.push(blockScalar);
+ }
+
+ /// Aliases to add literal or folded block scalar.
+ alias fetchLiteral = fetchBlockScalar!(ScalarStyle.literal);
+ alias fetchFolded = fetchBlockScalar!(ScalarStyle.folded);
+
+ /// Add quoted flow SCALAR token.
+ void fetchFlowScalar(ScalarStyle quotes)()
+ {
+ // A flow scalar could be a simple key.
+ savePossibleSimpleKey();
+ // No simple keys after flow scalars.
+ allowSimpleKey_ = false;
+
+ // Scan and add SCALAR.
+ auto scalar = scanFlowScalar(quotes);
+ tokens_.push(scalar);
+ }
+
+ /// Aliases to add single or double quoted block scalar.
+ alias fetchSingle = fetchFlowScalar!(ScalarStyle.singleQuoted);
+ alias fetchDouble = fetchFlowScalar!(ScalarStyle.doubleQuoted);
+
+ /// Add plain SCALAR token.
+ void fetchPlain() @safe
+ {
+ // A plain scalar could be a simple key
+ savePossibleSimpleKey();
+ // No simple keys after plain scalars. But note that scanPlain() will
+ // change this flag if the scan is finished at the beginning of the line.
+ allowSimpleKey_ = false;
+ auto plain = scanPlain();
+
+ // Scan and add SCALAR. May change allowSimpleKey_
+ tokens_.push(plain);
+ }
+
+ pure:
+
+ ///Check if the next token is DIRECTIVE: ^ '%' ...
+ bool checkDirective() @safe
+ {
+ return reader_.peekByte() == '%' && reader_.column == 0;
+ }
+
+ /// Check if the next token is DOCUMENT-START: ^ '---' (' '|'\n')
+ bool checkDocumentStart() @safe
+ {
+ // Check one char first, then all 3, to prevent reading outside the buffer.
+ return reader_.column == 0 &&
+ reader_.peekByte() == '-' &&
+ reader_.prefix(3) == "---" &&
+ reader_.peek(3).isWhiteSpace;
+ }
+
+ /// Check if the next token is DOCUMENT-END: ^ '...' (' '|'\n')
+ bool checkDocumentEnd() @safe
+ {
+ // Check one char first, then all 3, to prevent reading outside the buffer.
+ return reader_.column == 0 &&
+ reader_.peekByte() == '.' &&
+ reader_.prefix(3) == "..." &&
+ reader_.peek(3).isWhiteSpace;
+ }
+
+ /// Check if the next token is BLOCK-ENTRY: '-' (' '|'\n')
+ bool checkBlockEntry() @safe
+ {
+ return !!reader_.peek(1).isWhiteSpace;
+ }
+
+ /// Check if the next token is KEY(flow context): '?'
+ ///
+ /// or KEY(block context): '?' (' '|'\n')
+ bool checkKey() @safe
+ {
+ return (flowLevel_ > 0 || reader_.peek(1).isWhiteSpace);
+ }
+
+ /// Check if the next token is VALUE(flow context): ':'
+ ///
+ /// or VALUE(block context): ':' (' '|'\n')
+ bool checkValue() @safe
+ {
+ return flowLevel_ > 0 || reader_.peek(1).isWhiteSpace;
+ }
+
+ /// Check if the next token is a plain scalar.
+ ///
+ /// A plain scalar may start with any non-space character except:
+ /// '-', '?', ':', ',', '[', ']', '{', '}',
+ /// '#', '&', '*', '!', '|', '>', '\'', '\"',
+ /// '%', '@', '`'.
+ ///
+ /// It may also start with
+ /// '-', '?', ':'
+ /// if it is followed by a non-space character.
+ ///
+ /// Note that we limit the last rule to the block context (except the
+ /// '-' character) because we want the flow context to be space
+ /// independent.
+ bool checkPlain() @safe
+ {
+ const c = reader_.peek();
+ if(!c.isNonScalarStartCharacter)
+ {
+ return true;
+ }
+ return !reader_.peek(1).isWhiteSpace &&
+ (c == '-' || (flowLevel_ == 0 && (c == '?' || c == ':')));
+ }
+
+ /// Move to the next non-space character.
+ void findNextNonSpace() @safe
+ {
+ while(reader_.peekByte() == ' ') { reader_.forward(); }
+ }
+
+ /// Scan a string of alphanumeric or "-_" characters.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanAlphaNumericToSlice(string name)(const Mark startMark)
+ {
+ size_t length;
+ dchar c = reader_.peek();
+ while(c.isAlphaNum || c.among!('-', '_')) { c = reader_.peek(++length); }
+
+ enforce(length > 0, new ScannerException("While scanning " ~ name,
+ startMark, expected("alphanumeric, '-' or '_'", c), reader_.mark));
+
+ reader_.sliceBuilder.write(reader_.get(length));
+ }
+
+ /// Scan and throw away all characters until next line break.
+ void scanToNextBreak() @safe
+ {
+ while(!reader_.peek().isBreak) { reader_.forward(); }
+ }
+
+ /// Scan all characters until next line break.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanToNextBreakToSlice() @safe
+ {
+ uint length;
+ while(!reader_.peek(length).isBreak)
+ {
+ ++length;
+ }
+ reader_.sliceBuilder.write(reader_.get(length));
+ }
+
+
+ /// Move to next token in the file/stream.
+ ///
+ /// We ignore spaces, line breaks and comments.
+ /// If we find a line break in the block context, we set
+ /// allowSimpleKey` on.
+ ///
+ /// We do not yet support BOM inside the stream as the
+ /// specification requires. Any such mark will be considered as a part
+ /// of the document.
+ void scanToNextToken() @safe
+ {
+ // TODO(PyYAML): We need to make tab handling rules more sane. A good rule is:
+ // Tabs cannot precede tokens
+ // BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ // KEY(block), VALUE(block), BLOCK-ENTRY
+ // So the checking code is
+ // if <TAB>:
+ // allowSimpleKey_ = false
+ // We also need to add the check for `allowSimpleKey_ == true` to
+ // `unwindIndent` before issuing BLOCK-END.
+ // Scanners for block, flow, and plain scalars need to be modified.
+
+ for(;;)
+ {
+ //All whitespace in flow context is ignored, even whitespace
+ // not allowed in other contexts
+ if (flowLevel_ > 0)
+ {
+ while(reader_.peekByte().isNonLinebreakWhitespace) { reader_.forward(); }
+ }
+ else
+ {
+ findNextNonSpace();
+ }
+ if(reader_.peekByte() == '#') { scanToNextBreak(); }
+ if(scanLineBreak() != '\0')
+ {
+ if(flowLevel_ == 0) { allowSimpleKey_ = true; }
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ /// Scan directive token.
+ Token scanDirective() @safe
+ {
+ Mark startMark = reader_.mark;
+ // Skip the '%'.
+ reader_.forward();
+
+ // Scan directive name
+ reader_.sliceBuilder.begin();
+ scanDirectiveNameToSlice(startMark);
+ const name = reader_.sliceBuilder.finish();
+
+ reader_.sliceBuilder.begin();
+
+ // Index where tag handle ends and suffix starts in a tag directive value.
+ uint tagHandleEnd = uint.max;
+ if(name == "YAML") { scanYAMLDirectiveValueToSlice(startMark); }
+ else if(name == "TAG") { tagHandleEnd = scanTagDirectiveValueToSlice(startMark); }
+ char[] value = reader_.sliceBuilder.finish();
+
+ Mark endMark = reader_.mark;
+
+ DirectiveType directive;
+ if(name == "YAML") { directive = DirectiveType.yaml; }
+ else if(name == "TAG") { directive = DirectiveType.tag; }
+ else
+ {
+ directive = DirectiveType.reserved;
+ scanToNextBreak();
+ }
+
+ scanDirectiveIgnoredLine(startMark);
+
+ return directiveToken(startMark, endMark, value, directive, tagHandleEnd);
+ }
+
+ /// Scan name of a directive token.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanDirectiveNameToSlice(const Mark startMark) @safe
+ {
+ // Scan directive name.
+ scanAlphaNumericToSlice!"a directive"(startMark);
+
+ enforce(reader_.peek().among!(' ', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029'),
+ new ScannerException("While scanning a directive", startMark,
+ expected("alphanumeric, '-' or '_'", reader_.peek()), reader_.mark));
+ }
+
+ /// Scan value of a YAML directive token. Returns major, minor version separated by '.'.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanYAMLDirectiveValueToSlice(const Mark startMark) @safe
+ {
+ findNextNonSpace();
+
+ scanYAMLDirectiveNumberToSlice(startMark);
+
+ enforce(reader_.peekByte() == '.',
+ new ScannerException("While scanning a directive", startMark,
+ expected("digit or '.'", reader_.peek()), reader_.mark));
+ // Skip the '.'.
+ reader_.forward();
+
+ reader_.sliceBuilder.write('.');
+ scanYAMLDirectiveNumberToSlice(startMark);
+
+ enforce(reader_.peek().among!(' ', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029'),
+ new ScannerException("While scanning a directive", startMark,
+ expected("digit or '.'", reader_.peek()), reader_.mark));
+ }
+
+ /// Scan a number from a YAML directive.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanYAMLDirectiveNumberToSlice(const Mark startMark) @safe
+ {
+ enforce(isDigit(reader_.peek()),
+ new ScannerException("While scanning a directive", startMark,
+ expected("digit", reader_.peek()), reader_.mark));
+
+ // Already found the first digit in the enforce(), so set length to 1.
+ uint length = 1;
+ while(reader_.peek(length).isDigit) { ++length; }
+
+ reader_.sliceBuilder.write(reader_.get(length));
+ }
+
+ /// Scan value of a tag directive.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ ///
+ /// Returns: Length of tag handle (which is before tag prefix) in scanned data
+ uint scanTagDirectiveValueToSlice(const Mark startMark) @safe
+ {
+ findNextNonSpace();
+ const startLength = reader_.sliceBuilder.length;
+ scanTagDirectiveHandleToSlice(startMark);
+ const handleLength = cast(uint)(reader_.sliceBuilder.length - startLength);
+ findNextNonSpace();
+ scanTagDirectivePrefixToSlice(startMark);
+
+ return handleLength;
+ }
+
+ /// Scan handle of a tag directive.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanTagDirectiveHandleToSlice(const Mark startMark) @safe
+ {
+ scanTagHandleToSlice!"directive"(startMark);
+ enforce(reader_.peekByte() == ' ',
+ new ScannerException("While scanning a directive handle", startMark,
+ expected("' '", reader_.peek()), reader_.mark));
+ }
+
+ /// Scan prefix of a tag directive.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanTagDirectivePrefixToSlice(const Mark startMark) @safe
+ {
+ scanTagURIToSlice!"directive"(startMark);
+ enforce(reader_.peek().among!(' ', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029'),
+ new ScannerException("While scanning a directive prefix", startMark,
+ expected("' '", reader_.peek()), reader_.mark));
+ }
+
+ /// Scan (and ignore) ignored line after a directive.
+ void scanDirectiveIgnoredLine(const Mark startMark) @safe
+ {
+ findNextNonSpace();
+ if(reader_.peekByte() == '#') { scanToNextBreak(); }
+ enforce(reader_.peek().isBreak,
+ new ScannerException("While scanning a directive", startMark,
+ expected("comment or a line break", reader_.peek()), reader_.mark));
+ scanLineBreak();
+ }
+
+
+ /// Scan an alias or an anchor.
+ ///
+ /// The specification does not restrict characters for anchors and
+ /// aliases. This may lead to problems, for instance, the document:
+ /// [ *alias, value ]
+ /// can be interpteted in two ways, as
+ /// [ "value" ]
+ /// and
+ /// [ *alias , "value" ]
+ /// Therefore we restrict aliases to ASCII alphanumeric characters.
+ Token scanAnchor(const TokenID id) @safe
+ {
+ const startMark = reader_.mark;
+ const dchar i = reader_.get();
+
+ reader_.sliceBuilder.begin();
+ if(i == '*') { scanAlphaNumericToSlice!"an alias"(startMark); }
+ else { scanAlphaNumericToSlice!"an anchor"(startMark); }
+ // On error, value is discarded as we return immediately
+ char[] value = reader_.sliceBuilder.finish();
+
+ enum anchorCtx = "While scanning an anchor";
+ enum aliasCtx = "While scanning an alias";
+ enforce(reader_.peek().isWhiteSpace ||
+ reader_.peekByte().among!('?', ':', ',', ']', '}', '%', '@'),
+ new ScannerException(i == '*' ? aliasCtx : anchorCtx, startMark,
+ expected("alphanumeric, '-' or '_'", reader_.peek()), reader_.mark));
+
+ if(id == TokenID.alias_)
+ {
+ return aliasToken(startMark, reader_.mark, value);
+ }
+ if(id == TokenID.anchor)
+ {
+ return anchorToken(startMark, reader_.mark, value);
+ }
+ assert(false, "This code should never be reached");
+ }
+
+ /// Scan a tag token.
+ Token scanTag() @safe
+ {
+ const startMark = reader_.mark;
+ dchar c = reader_.peek(1);
+
+ reader_.sliceBuilder.begin();
+ scope(failure) { reader_.sliceBuilder.finish(); }
+ // Index where tag handle ends and tag suffix starts in the tag value
+ // (slice) we will produce.
+ uint handleEnd;
+
+ if(c == '<')
+ {
+ reader_.forward(2);
+
+ handleEnd = 0;
+ scanTagURIToSlice!"tag"(startMark);
+ enforce(reader_.peekByte() == '>',
+ new ScannerException("While scanning a tag", startMark,
+ expected("'>'", reader_.peek()), reader_.mark));
+ reader_.forward();
+ }
+ else if(c.isWhiteSpace)
+ {
+ reader_.forward();
+ handleEnd = 0;
+ reader_.sliceBuilder.write('!');
+ }
+ else
+ {
+ uint length = 1;
+ bool useHandle;
+
+ while(!c.isBreakOrSpace)
+ {
+ if(c == '!')
+ {
+ useHandle = true;
+ break;
+ }
+ ++length;
+ c = reader_.peek(length);
+ }
+
+ if(useHandle)
+ {
+ scanTagHandleToSlice!"tag"(startMark);
+ handleEnd = cast(uint)reader_.sliceBuilder.length;
+ }
+ else
+ {
+ reader_.forward();
+ reader_.sliceBuilder.write('!');
+ handleEnd = cast(uint)reader_.sliceBuilder.length;
+ }
+
+ scanTagURIToSlice!"tag"(startMark);
+ }
+
+ enforce(reader_.peek().isBreakOrSpace,
+ new ScannerException("While scanning a tag", startMark, expected("' '", reader_.peek()),
+ reader_.mark));
+
+ char[] slice = reader_.sliceBuilder.finish();
+ return tagToken(startMark, reader_.mark, slice, handleEnd);
+ }
+
+ /// Scan a block scalar token with specified style.
+ Token scanBlockScalar(const ScalarStyle style) @safe
+ {
+ const startMark = reader_.mark;
+
+ // Scan the header.
+ reader_.forward();
+
+ const indicators = scanBlockScalarIndicators(startMark);
+
+ const chomping = indicators[0];
+ const increment = indicators[1];
+ scanBlockScalarIgnoredLine(startMark);
+
+ // Determine the indentation level and go to the first non-empty line.
+ Mark endMark;
+ uint indent = max(1, indent_ + 1);
+
+ reader_.sliceBuilder.begin();
+ alias Transaction = SliceBuilder.Transaction;
+ // Used to strip the last line breaks written to the slice at the end of the
+ // scalar, which may be needed based on chomping.
+ Transaction breaksTransaction = Transaction(&reader_.sliceBuilder);
+ // Read the first indentation/line breaks before the scalar.
+ size_t startLen = reader_.sliceBuilder.length;
+ if(increment == int.min)
+ {
+ auto indentation = scanBlockScalarIndentationToSlice();
+ endMark = indentation[1];
+ indent = max(indent, indentation[0]);
+ }
+ else
+ {
+ indent += increment - 1;
+ endMark = scanBlockScalarBreaksToSlice(indent);
+ }
+
+ // int.max means there's no line break (int.max is outside UTF-32).
+ dchar lineBreak = cast(dchar)int.max;
+
+ // Scan the inner part of the block scalar.
+ while(reader_.column == indent && reader_.peekByte() != '\0')
+ {
+ breaksTransaction.commit();
+ const bool leadingNonSpace = !reader_.peekByte().among!(' ', '\t');
+ // This is where the 'interesting' non-whitespace data gets read.
+ scanToNextBreakToSlice();
+ lineBreak = scanLineBreak();
+
+
+ // This transaction serves to rollback data read in the
+ // scanBlockScalarBreaksToSlice() call.
+ breaksTransaction = Transaction(&reader_.sliceBuilder);
+ startLen = reader_.sliceBuilder.length;
+ // The line breaks should actually be written _after_ the if() block
+ // below. We work around that by inserting
+ endMark = scanBlockScalarBreaksToSlice(indent);
+
+ // This will not run during the last iteration (see the if() vs the
+ // while()), hence breaksTransaction rollback (which happens after this
+ // loop) will never roll back data written in this if() block.
+ if(reader_.column == indent && reader_.peekByte() != '\0')
+ {
+ // Unfortunately, folding rules are ambiguous.
+
+ // This is the folding according to the specification:
+ if(style == ScalarStyle.folded && lineBreak == '\n' &&
+ leadingNonSpace && !reader_.peekByte().among!(' ', '\t'))
+ {
+ // No breaks were scanned; no need to insert the space in the
+ // middle of slice.
+ if(startLen == reader_.sliceBuilder.length)
+ {
+ reader_.sliceBuilder.write(' ');
+ }
+ }
+ else
+ {
+ // We need to insert in the middle of the slice in case any line
+ // breaks were scanned.
+ reader_.sliceBuilder.insert(lineBreak, startLen);
+ }
+
+ ////this is Clark Evans's interpretation (also in the spec
+ ////examples):
+ //
+ //if(style == ScalarStyle.folded && lineBreak == '\n')
+ //{
+ // if(startLen == endLen)
+ // {
+ // if(!" \t"d.canFind(reader_.peekByte()))
+ // {
+ // reader_.sliceBuilder.write(' ');
+ // }
+ // else
+ // {
+ // chunks ~= lineBreak;
+ // }
+ // }
+ //}
+ //else
+ //{
+ // reader_.sliceBuilder.insertBack(lineBreak, endLen - startLen);
+ //}
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ // If chompint is Keep, we keep (commit) the last scanned line breaks
+ // (which are at the end of the scalar). Otherwise re remove them (end the
+ // transaction).
+ if(chomping == Chomping.keep) { breaksTransaction.commit(); }
+ else { breaksTransaction.end(); }
+ if(chomping != Chomping.strip && lineBreak != int.max)
+ {
+ // If chomping is Keep, we keep the line break but the first line break
+ // that isn't stripped (since chomping isn't Strip in this branch) must
+ // be inserted _before_ the other line breaks.
+ if(chomping == Chomping.keep)
+ {
+ reader_.sliceBuilder.insert(lineBreak, startLen);
+ }
+ // If chomping is not Keep, breaksTransaction was cancelled so we can
+ // directly write the first line break (as it isn't stripped - chomping
+ // is not Strip)
+ else
+ {
+ reader_.sliceBuilder.write(lineBreak);
+ }
+ }
+
+ char[] slice = reader_.sliceBuilder.finish();
+ return scalarToken(startMark, endMark, slice, style);
+ }
+
+ /// Scan chomping and indentation indicators of a scalar token.
+ Tuple!(Chomping, int) scanBlockScalarIndicators(const Mark startMark) @safe
+ {
+ auto chomping = Chomping.clip;
+ int increment = int.min;
+ dchar c = reader_.peek();
+
+ /// Indicators can be in any order.
+ if(getChomping(c, chomping))
+ {
+ getIncrement(c, increment, startMark);
+ }
+ else
+ {
+ const gotIncrement = getIncrement(c, increment, startMark);
+ if(gotIncrement) { getChomping(c, chomping); }
+ }
+
+ enforce(c.among!(' ', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029'),
+ new ScannerException("While scanning a block scalar", startMark,
+ expected("chomping or indentation indicator", c), reader_.mark));
+
+ return tuple(chomping, increment);
+ }
+
+ /// Get chomping indicator, if detected. Return false otherwise.
+ ///
+ /// Used in scanBlockScalarIndicators.
+ ///
+ /// Params:
+ ///
+ /// c = The character that may be a chomping indicator.
+ /// chomping = Write the chomping value here, if detected.
+ bool getChomping(ref dchar c, ref Chomping chomping) @safe
+ {
+ if(!c.among!('+', '-')) { return false; }
+ chomping = c == '+' ? Chomping.keep : Chomping.strip;
+ reader_.forward();
+ c = reader_.peek();
+ return true;
+ }
+
+ /// Get increment indicator, if detected. Return false otherwise.
+ ///
+ /// Used in scanBlockScalarIndicators.
+ ///
+ /// Params:
+ ///
+ /// c = The character that may be an increment indicator.
+ /// If an increment indicator is detected, this will be updated to
+ /// the next character in the Reader.
+ /// increment = Write the increment value here, if detected.
+ /// startMark = Mark for error messages.
+ bool getIncrement(ref dchar c, ref int increment, const Mark startMark) @safe
+ {
+ if(!c.isDigit) { return false; }
+ // Convert a digit to integer.
+ increment = c - '0';
+ assert(increment < 10 && increment >= 0, "Digit has invalid value");
+
+ enforce(increment > 0,
+ new ScannerException("While scanning a block scalar", startMark,
+ expected("indentation indicator in range 1-9", "0"), reader_.mark));
+
+ reader_.forward();
+ c = reader_.peek();
+ return true;
+ }
+
+ /// Scan (and ignore) ignored line in a block scalar.
+ void scanBlockScalarIgnoredLine(const Mark startMark) @safe
+ {
+ findNextNonSpace();
+ if(reader_.peekByte()== '#') { scanToNextBreak(); }
+
+ enforce(reader_.peek().isBreak,
+ new ScannerException("While scanning a block scalar", startMark,
+ expected("comment or line break", reader_.peek()), reader_.mark));
+
+ scanLineBreak();
+ }
+
+ /// Scan indentation in a block scalar, returning line breaks, max indent and end mark.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ Tuple!(uint, Mark) scanBlockScalarIndentationToSlice() @safe
+ {
+ uint maxIndent;
+ Mark endMark = reader_.mark;
+
+ while(reader_.peek().among!(' ', '\n', '\r', '\u0085', '\u2028', '\u2029'))
+ {
+ if(reader_.peekByte() != ' ')
+ {
+ reader_.sliceBuilder.write(scanLineBreak());
+ endMark = reader_.mark;
+ continue;
+ }
+ reader_.forward();
+ maxIndent = max(reader_.column, maxIndent);
+ }
+
+ return tuple(maxIndent, endMark);
+ }
+
+ /// Scan line breaks at lower or specified indentation in a block scalar.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ Mark scanBlockScalarBreaksToSlice(const uint indent) @safe
+ {
+ Mark endMark = reader_.mark;
+
+ for(;;)
+ {
+ while(reader_.column < indent && reader_.peekByte() == ' ') { reader_.forward(); }
+ if(!reader_.peek().among!('\n', '\r', '\u0085', '\u2028', '\u2029')) { break; }
+ reader_.sliceBuilder.write(scanLineBreak());
+ endMark = reader_.mark;
+ }
+
+ return endMark;
+ }
+
+ /// Scan a qouted flow scalar token with specified quotes.
+ Token scanFlowScalar(const ScalarStyle quotes) @safe
+ {
+ const startMark = reader_.mark;
+ const quote = reader_.get();
+
+ reader_.sliceBuilder.begin();
+
+ scanFlowScalarNonSpacesToSlice(quotes, startMark);
+
+ while(reader_.peek() != quote)
+ {
+ scanFlowScalarSpacesToSlice(startMark);
+ scanFlowScalarNonSpacesToSlice(quotes, startMark);
+ }
+ reader_.forward();
+
+ auto slice = reader_.sliceBuilder.finish();
+ return scalarToken(startMark, reader_.mark, slice, quotes);
+ }
+
+ /// Scan nonspace characters in a flow scalar.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanFlowScalarNonSpacesToSlice(const ScalarStyle quotes, const Mark startMark)
+ @safe
+ {
+ for(;;)
+ {
+ dchar c = reader_.peek();
+
+ size_t numCodePoints;
+ while(!reader_.peek(numCodePoints).isFlowScalarBreakSpace) { ++numCodePoints; }
+
+ if (numCodePoints > 0) { reader_.sliceBuilder.write(reader_.get(numCodePoints)); }
+
+ c = reader_.peek();
+ if(quotes == ScalarStyle.singleQuoted && c == '\'' && reader_.peek(1) == '\'')
+ {
+ reader_.forward(2);
+ reader_.sliceBuilder.write('\'');
+ }
+ else if((quotes == ScalarStyle.doubleQuoted && c == '\'') ||
+ (quotes == ScalarStyle.singleQuoted && c.among!('"', '\\')))
+ {
+ reader_.forward();
+ reader_.sliceBuilder.write(c);
+ }
+ else if(quotes == ScalarStyle.doubleQuoted && c == '\\')
+ {
+ reader_.forward();
+ c = reader_.peek();
+ if(c.among!(escapes))
+ {
+ reader_.forward();
+ // Escaping has been moved to Parser as it can't be done in
+ // place (in a slice) in case of '\P' and '\L' (very uncommon,
+ // but we don't want to break the spec)
+ char[2] escapeSequence = ['\\', cast(char)c];
+ reader_.sliceBuilder.write(escapeSequence);
+ }
+ else if(c.among!(escapeHexCodeList))
+ {
+ const hexLength = dyaml.escapes.escapeHexLength(c);
+ reader_.forward();
+
+ foreach(i; 0 .. hexLength) {
+ enforce(reader_.peek(i).isHexDigit,
+ new ScannerException("While scanning a double quoted scalar", startMark,
+ expected("escape sequence of hexadecimal numbers",
+ reader_.peek(i)), reader_.mark));
+ }
+ char[] hex = reader_.get(hexLength);
+
+ enforce((hex.length > 0) && (hex.length <= 8),
+ new ScannerException("While scanning a double quoted scalar", startMark,
+ "overflow when parsing an escape sequence of " ~
+ "hexadecimal numbers.", reader_.mark));
+
+ char[2] escapeStart = ['\\', cast(char) c];
+ reader_.sliceBuilder.write(escapeStart);
+ reader_.sliceBuilder.write(hex);
+
+ }
+ else if(c.among!('\n', '\r', '\u0085', '\u2028', '\u2029'))
+ {
+ scanLineBreak();
+ scanFlowScalarBreaksToSlice(startMark);
+ }
+ else
+ {
+ throw new ScannerException("While scanning a double quoted scalar", startMark,
+ text("found unsupported escape character ", c),
+ reader_.mark);
+ }
+ }
+ else { return; }
+ }
+ }
+
+ /// Scan space characters in a flow scalar.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// spaces into that slice.
+ void scanFlowScalarSpacesToSlice(const Mark startMark) @safe
+ {
+ // Increase length as long as we see whitespace.
+ size_t length;
+ while(reader_.peekByte(length).among!(' ', '\t')) { ++length; }
+ auto whitespaces = reader_.prefixBytes(length);
+
+ // Can check the last byte without striding because '\0' is ASCII
+ const c = reader_.peek(length);
+ enforce(c != '\0',
+ new ScannerException("While scanning a quoted scalar", startMark,
+ "found unexpected end of buffer", reader_.mark));
+
+ // Spaces not followed by a line break.
+ if(!c.among!('\n', '\r', '\u0085', '\u2028', '\u2029'))
+ {
+ reader_.forward(length);
+ reader_.sliceBuilder.write(whitespaces);
+ return;
+ }
+
+ // There's a line break after the spaces.
+ reader_.forward(length);
+ const lineBreak = scanLineBreak();
+
+ if(lineBreak != '\n') { reader_.sliceBuilder.write(lineBreak); }
+
+ // If we have extra line breaks after the first, scan them into the
+ // slice.
+ const bool extraBreaks = scanFlowScalarBreaksToSlice(startMark);
+
+ // No extra breaks, one normal line break. Replace it with a space.
+ if(lineBreak == '\n' && !extraBreaks) { reader_.sliceBuilder.write(' '); }
+ }
+
+ /// Scan line breaks in a flow scalar.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// line breaks into that slice.
+ bool scanFlowScalarBreaksToSlice(const Mark startMark) @safe
+ {
+ // True if at least one line break was found.
+ bool anyBreaks;
+ for(;;)
+ {
+ // Instead of checking indentation, we check for document separators.
+ const prefix = reader_.prefix(3);
+ enforce(!(prefix == "---" || prefix == "...") ||
+ !reader_.peek(3).isWhiteSpace,
+ new ScannerException("While scanning a quoted scalar", startMark,
+ "found unexpected document separator", reader_.mark));
+
+ // Skip any whitespaces.
+ while(reader_.peekByte().among!(' ', '\t')) { reader_.forward(); }
+
+ // Encountered a non-whitespace non-linebreak character, so we're done.
+ if(!reader_.peek().among!(' ', '\n', '\r', '\u0085', '\u2028', '\u2029')) { break; }
+
+ const lineBreak = scanLineBreak();
+ anyBreaks = true;
+ reader_.sliceBuilder.write(lineBreak);
+ }
+ return anyBreaks;
+ }
+
+ /// Scan plain scalar token (no block, no quotes).
+ Token scanPlain() @safe
+ {
+ // We keep track of the allowSimpleKey_ flag here.
+ // Indentation rules are loosed for the flow context
+ const startMark = reader_.mark;
+ Mark endMark = startMark;
+ const indent = indent_ + 1;
+
+ // We allow zero indentation for scalars, but then we need to check for
+ // document separators at the beginning of the line.
+ // if(indent == 0) { indent = 1; }
+
+ reader_.sliceBuilder.begin();
+
+ alias Transaction = SliceBuilder.Transaction;
+ Transaction spacesTransaction;
+ // Stop at a comment.
+ while(reader_.peekByte() != '#')
+ {
+ // Scan the entire plain scalar.
+ size_t length;
+ dchar c = reader_.peek(length);
+ for(;;)
+ {
+ const cNext = reader_.peek(length + 1);
+ if(c.isWhiteSpace ||
+ (flowLevel_ == 0 && c == ':' && cNext.isWhiteSpace) ||
+ (flowLevel_ > 0 && c.among!(',', ':', '?', '[', ']', '{', '}')))
+ {
+ break;
+ }
+ ++length;
+ c = cNext;
+ }
+
+ // It's not clear what we should do with ':' in the flow context.
+ enforce(flowLevel_ == 0 || c != ':' ||
+ reader_.peek(length + 1).isWhiteSpace ||
+ reader_.peek(length + 1).among!(',', '[', ']', '{', '}'),
+ new ScannerException("While scanning a plain scalar", startMark,
+ "found unexpected ':' . Please check " ~
+ "http://pyyaml.org/wiki/YAMLColonInFlowContext for details.",
+ reader_.mark));
+
+ if(length == 0) { break; }
+
+ allowSimpleKey_ = false;
+
+ reader_.sliceBuilder.write(reader_.get(length));
+
+ endMark = reader_.mark;
+
+ spacesTransaction.commit();
+ spacesTransaction = Transaction(&reader_.sliceBuilder);
+
+ const startLength = reader_.sliceBuilder.length;
+ scanPlainSpacesToSlice();
+ if(startLength == reader_.sliceBuilder.length ||
+ (flowLevel_ == 0 && reader_.column < indent))
+ {
+ break;
+ }
+ }
+
+ spacesTransaction.end();
+ char[] slice = reader_.sliceBuilder.finish();
+
+ return scalarToken(startMark, endMark, slice, ScalarStyle.plain);
+ }
+
+ /// Scan spaces in a plain scalar.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the spaces
+ /// into that slice.
+ void scanPlainSpacesToSlice() @safe
+ {
+ // The specification is really confusing about tabs in plain scalars.
+ // We just forbid them completely. Do not use tabs in YAML!
+
+ // Get as many plain spaces as there are.
+ size_t length;
+ while(reader_.peekByte(length) == ' ') { ++length; }
+ char[] whitespaces = reader_.prefixBytes(length);
+ reader_.forward(length);
+
+ const dchar c = reader_.peek();
+ if(!c.isNSChar)
+ {
+ // We have spaces, but no newline.
+ if(whitespaces.length > 0) { reader_.sliceBuilder.write(whitespaces); }
+ return;
+ }
+
+ // Newline after the spaces (if any)
+ const lineBreak = scanLineBreak();
+ allowSimpleKey_ = true;
+
+ static bool end(Reader reader_) @safe pure
+ {
+ const prefix = reader_.prefix(3);
+ return ("---" == prefix || "..." == prefix)
+ && reader_.peek(3).among!(' ', '\t', '\0', '\n', '\r', '\u0085', '\u2028', '\u2029');
+ }
+
+ if(end(reader_)) { return; }
+
+ bool extraBreaks;
+
+ alias Transaction = SliceBuilder.Transaction;
+ auto transaction = Transaction(&reader_.sliceBuilder);
+ if(lineBreak != '\n') { reader_.sliceBuilder.write(lineBreak); }
+ while(reader_.peek().isNSChar)
+ {
+ if(reader_.peekByte() == ' ') { reader_.forward(); }
+ else
+ {
+ const lBreak = scanLineBreak();
+ extraBreaks = true;
+ reader_.sliceBuilder.write(lBreak);
+
+ if(end(reader_)) { return; }
+ }
+ }
+ transaction.commit();
+
+ // No line breaks, only a space.
+ if(lineBreak == '\n' && !extraBreaks) { reader_.sliceBuilder.write(' '); }
+ }
+
+ /// Scan handle of a tag token.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanTagHandleToSlice(string name)(const Mark startMark)
+ {
+ dchar c = reader_.peek();
+ enum contextMsg = "While scanning a " ~ name;
+ enforce(c == '!',
+ new ScannerException(contextMsg, startMark, expected("'!'", c), reader_.mark));
+
+ uint length = 1;
+ c = reader_.peek(length);
+ if(c != ' ')
+ {
+ while(c.isAlphaNum || c.among!('-', '_'))
+ {
+ ++length;
+ c = reader_.peek(length);
+ }
+ enforce(c == '!',
+ new ScannerException(contextMsg, startMark, expected("'!'", c), reader_.mark));
+ ++length;
+ }
+
+ reader_.sliceBuilder.write(reader_.get(length));
+ }
+
+ /// Scan URI in a tag token.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanTagURIToSlice(string name)(const Mark startMark)
+ {
+ // Note: we do not check if URI is well-formed.
+ dchar c = reader_.peek();
+ const startLen = reader_.sliceBuilder.length;
+ {
+ uint length;
+ while(c.isAlphaNum || c.isURIChar)
+ {
+ if(c == '%')
+ {
+ auto chars = reader_.get(length);
+ reader_.sliceBuilder.write(chars);
+ length = 0;
+ scanURIEscapesToSlice!name(startMark);
+ }
+ else { ++length; }
+ c = reader_.peek(length);
+ }
+ if(length > 0)
+ {
+ auto chars = reader_.get(length);
+ reader_.sliceBuilder.write(chars);
+ length = 0;
+ }
+ }
+ // OK if we scanned something, error otherwise.
+ enum contextMsg = "While parsing a " ~ name;
+ enforce(reader_.sliceBuilder.length > startLen,
+ new ScannerException(contextMsg, startMark, expected("URI", c), reader_.mark));
+ }
+
+ // Not @nogc yet because std.utf.decode is not @nogc
+ /// Scan URI escape sequences.
+ ///
+ /// Assumes that the caller is building a slice in Reader, and puts the scanned
+ /// characters into that slice.
+ void scanURIEscapesToSlice(string name)(const Mark startMark)
+ {
+ import core.exception : UnicodeException;
+ // URI escapes encode a UTF-8 string. We store UTF-8 code units here for
+ // decoding into UTF-32.
+ Appender!string buffer;
+
+
+ enum contextMsg = "While scanning a " ~ name;
+ while(reader_.peekByte() == '%')
+ {
+ reader_.forward();
+ char[2] nextByte = [reader_.peekByte(), reader_.peekByte(1)];
+
+ enforce(nextByte[0].isHexDigit && nextByte[1].isHexDigit,
+ new ScannerException(contextMsg, startMark,
+ expected("URI escape sequence of 2 hexadecimal " ~
+ "numbers", nextByte), reader_.mark));
+
+ buffer ~= nextByte[].to!ubyte(16);
+
+ reader_.forward(2);
+ }
+ try
+ {
+ foreach (dchar chr; buffer.data)
+ {
+ reader_.sliceBuilder.write(chr);
+ }
+ }
+ catch (UnicodeException)
+ {
+ throw new ScannerException(contextMsg, startMark,
+ "Invalid UTF-8 data encoded in URI escape sequence",
+ reader_.mark);
+ }
+ }
+
+
+ /// Scan a line break, if any.
+ ///
+ /// Transforms:
+ /// '\r\n' : '\n'
+ /// '\r' : '\n'
+ /// '\n' : '\n'
+ /// '\u0085' : '\n'
+ /// '\u2028' : '\u2028'
+ /// '\u2029 : '\u2029'
+ /// no break : '\0'
+ dchar scanLineBreak() @safe
+ {
+ // Fast path for ASCII line breaks.
+ const b = reader_.peekByte();
+ if(b < 0x80)
+ {
+ if(b == '\n' || b == '\r')
+ {
+ if(reader_.prefix(2) == "\r\n") { reader_.forward(2); }
+ else { reader_.forward(); }
+ return '\n';
+ }
+ return '\0';
+ }
+
+ const c = reader_.peek();
+ if(c == '\x85')
+ {
+ reader_.forward();
+ return '\n';
+ }
+ if(c == '\u2028' || c == '\u2029')
+ {
+ reader_.forward();
+ return c;
+ }
+ return '\0';
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/serializer.d b/src/ext_depends/D-YAML/source/dyaml/serializer.d
new file mode 100644
index 0000000..4100cf3
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/serializer.d
@@ -0,0 +1,322 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/**
+ * YAML serializer.
+ * Code based on PyYAML: http://www.pyyaml.org
+ */
+module dyaml.serializer;
+
+
+import std.array;
+import std.format;
+import std.typecons;
+
+import dyaml.emitter;
+import dyaml.event;
+import dyaml.exception;
+import dyaml.node;
+import dyaml.resolver;
+import dyaml.tagdirective;
+import dyaml.token;
+
+
+package:
+
+///Serializes represented YAML nodes, generating events which are then emitted by Emitter.
+struct Serializer
+{
+ private:
+ ///Resolver used to determine which tags are automaticaly resolvable.
+ Resolver resolver_;
+
+ ///Do all document starts have to be specified explicitly?
+ Flag!"explicitStart" explicitStart_;
+ ///Do all document ends have to be specified explicitly?
+ Flag!"explicitEnd" explicitEnd_;
+ ///YAML version string.
+ string YAMLVersion_;
+
+ ///Tag directives to emit.
+ TagDirective[] tagDirectives_;
+
+ //TODO Use something with more deterministic memory usage.
+ ///Nodes with assigned anchors.
+ string[Node] anchors_;
+ ///Nodes with assigned anchors that are already serialized.
+ bool[Node] serializedNodes_;
+ ///ID of the last anchor generated.
+ uint lastAnchorID_ = 0;
+
+ public:
+ /**
+ * Construct a Serializer.
+ *
+ * Params:
+ * resolver = Resolver used to determine which tags are automaticaly resolvable.
+ * explicitStart = Do all document starts have to be specified explicitly?
+ * explicitEnd = Do all document ends have to be specified explicitly?
+ * YAMLVersion = YAML version string.
+ * tagDirectives = Tag directives to emit.
+ */
+ this(Resolver resolver,
+ const Flag!"explicitStart" explicitStart,
+ const Flag!"explicitEnd" explicitEnd, string YAMLVersion,
+ TagDirective[] tagDirectives) @safe
+ {
+ resolver_ = resolver;
+ explicitStart_ = explicitStart;
+ explicitEnd_ = explicitEnd;
+ YAMLVersion_ = YAMLVersion;
+ tagDirectives_ = tagDirectives;
+ }
+
+ ///Begin the stream.
+ void startStream(EmitterT)(ref EmitterT emitter) @safe
+ {
+ emitter.emit(streamStartEvent(Mark(), Mark()));
+ }
+
+ ///End the stream.
+ void endStream(EmitterT)(ref EmitterT emitter) @safe
+ {
+ emitter.emit(streamEndEvent(Mark(), Mark()));
+ }
+
+ ///Serialize a node, emitting it in the process.
+ void serialize(EmitterT)(ref EmitterT emitter, ref Node node) @safe
+ {
+ emitter.emit(documentStartEvent(Mark(), Mark(), explicitStart_,
+ YAMLVersion_, tagDirectives_));
+ anchorNode(node);
+ serializeNode(emitter, node);
+ emitter.emit(documentEndEvent(Mark(), Mark(), explicitEnd_));
+ serializedNodes_.destroy();
+ anchors_.destroy();
+ string[Node] emptyAnchors;
+ anchors_ = emptyAnchors;
+ lastAnchorID_ = 0;
+ }
+
+ private:
+ /**
+ * Determine if it's a good idea to add an anchor to a node.
+ *
+ * Used to prevent associating every single repeating scalar with an
+ * anchor/alias - only nodes long enough can use anchors.
+ *
+ * Params: node = Node to check for anchorability.
+ *
+ * Returns: True if the node is anchorable, false otherwise.
+ */
+ static bool anchorable(ref Node node) @safe
+ {
+ if(node.nodeID == NodeID.scalar)
+ {
+ return (node.type == NodeType.string) ? node.as!string.length > 64 :
+ (node.type == NodeType.binary) ? node.as!(ubyte[]).length > 64 :
+ false;
+ }
+ return node.length > 2;
+ }
+
+ @safe unittest
+ {
+ import std.string : representation;
+ auto shortString = "not much";
+ auto longString = "A fairly long string that would be a good idea to add an anchor to";
+ auto node1 = Node(shortString);
+ auto node2 = Node(shortString.representation.dup);
+ auto node3 = Node(longString);
+ auto node4 = Node(longString.representation.dup);
+ auto node5 = Node([node1]);
+ auto node6 = Node([node1, node2, node3, node4]);
+ assert(!anchorable(node1));
+ assert(!anchorable(node2));
+ assert(anchorable(node3));
+ assert(anchorable(node4));
+ assert(!anchorable(node5));
+ assert(anchorable(node6));
+ }
+
+ ///Add an anchor to the node if it's anchorable and not anchored yet.
+ void anchorNode(ref Node node) @safe
+ {
+ if(!anchorable(node)){return;}
+
+ if((node in anchors_) !is null)
+ {
+ if(anchors_[node] is null)
+ {
+ anchors_[node] = generateAnchor();
+ }
+ return;
+ }
+
+ anchors_.remove(node);
+ final switch (node.nodeID)
+ {
+ case NodeID.mapping:
+ foreach(ref Node key, ref Node value; node)
+ {
+ anchorNode(key);
+ anchorNode(value);
+ }
+ break;
+ case NodeID.sequence:
+ foreach(ref Node item; node)
+ {
+ anchorNode(item);
+ }
+ break;
+ case NodeID.invalid:
+ assert(0);
+ case NodeID.scalar:
+ }
+ }
+
+ ///Generate and return a new anchor.
+ string generateAnchor() @safe
+ {
+ ++lastAnchorID_;
+ auto appender = appender!string();
+ formattedWrite(appender, "id%03d", lastAnchorID_);
+ return appender.data;
+ }
+
+ ///Serialize a node and all its subnodes.
+ void serializeNode(EmitterT)(ref EmitterT emitter, ref Node node) @safe
+ {
+ //If the node has an anchor, emit an anchor (as aliasEvent) on the
+ //first occurrence, save it in serializedNodes_, and emit an alias
+ //if it reappears.
+ string aliased;
+ if(anchorable(node) && (node in anchors_) !is null)
+ {
+ aliased = anchors_[node];
+ if((node in serializedNodes_) !is null)
+ {
+ emitter.emit(aliasEvent(Mark(), Mark(), aliased));
+ return;
+ }
+ serializedNodes_[node] = true;
+ }
+ final switch (node.nodeID)
+ {
+ case NodeID.mapping:
+ const defaultTag = resolver_.defaultMappingTag;
+ const implicit = node.tag_ == defaultTag;
+ emitter.emit(mappingStartEvent(Mark(), Mark(), aliased, node.tag_,
+ implicit, node.collectionStyle));
+ foreach(ref Node key, ref Node value; node)
+ {
+ serializeNode(emitter, key);
+ serializeNode(emitter, value);
+ }
+ emitter.emit(mappingEndEvent(Mark(), Mark()));
+ return;
+ case NodeID.sequence:
+ const defaultTag = resolver_.defaultSequenceTag;
+ const implicit = node.tag_ == defaultTag;
+ emitter.emit(sequenceStartEvent(Mark(), Mark(), aliased, node.tag_,
+ implicit, node.collectionStyle));
+ foreach(ref Node item; node)
+ {
+ serializeNode(emitter, item);
+ }
+ emitter.emit(sequenceEndEvent(Mark(), Mark()));
+ return;
+ case NodeID.scalar:
+ assert(node.type == NodeType.string, "Scalar node type must be string before serialized");
+ auto value = node.as!string;
+ const detectedTag = resolver_.resolve(NodeID.scalar, null, value, true);
+ const bool isDetected = node.tag_ == detectedTag;
+
+ emitter.emit(scalarEvent(Mark(), Mark(), aliased, node.tag_,
+ isDetected, value, node.scalarStyle));
+ return;
+ case NodeID.invalid:
+ assert(0);
+ }
+ }
+}
+
+// Issue #244
+@safe unittest
+{
+ import dyaml.dumper : dumper;
+ auto node = Node([
+ Node.Pair(
+ Node(""),
+ Node([
+ Node([
+ Node.Pair(
+ Node("d"),
+ Node([
+ Node([
+ Node.Pair(
+ Node("c"),
+ Node("")
+ ),
+ Node.Pair(
+ Node("b"),
+ Node("")
+ ),
+ Node.Pair(
+ Node(""),
+ Node("")
+ )
+ ])
+ ])
+ ),
+ ]),
+ Node([
+ Node.Pair(
+ Node("d"),
+ Node([
+ Node(""),
+ Node(""),
+ Node([
+ Node.Pair(
+ Node("c"),
+ Node("")
+ ),
+ Node.Pair(
+ Node("b"),
+ Node("")
+ ),
+ Node.Pair(
+ Node(""),
+ Node("")
+ )
+ ])
+ ])
+ ),
+ Node.Pair(
+ Node("z"),
+ Node("")
+ ),
+ Node.Pair(
+ Node(""),
+ Node("")
+ )
+ ]),
+ Node("")
+ ])
+ ),
+ Node.Pair(
+ Node("g"),
+ Node("")
+ ),
+ Node.Pair(
+ Node("h"),
+ Node("")
+ ),
+ ]);
+
+ auto stream = appender!string();
+ dumper().dump(stream, node);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/style.d b/src/ext_depends/D-YAML/source/dyaml/style.d
new file mode 100644
index 0000000..319592c
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/style.d
@@ -0,0 +1,37 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+///YAML node formatting styles.
+module dyaml.style;
+
+
+///Scalar styles.
+enum ScalarStyle : ubyte
+{
+ /// Invalid (uninitialized) style
+ invalid = 0,
+ /// `|` (Literal block style)
+ literal,
+ /// `>` (Folded block style)
+ folded,
+ /// Plain scalar
+ plain,
+ /// Single quoted scalar
+ singleQuoted,
+ /// Double quoted scalar
+ doubleQuoted
+}
+
+///Collection styles.
+enum CollectionStyle : ubyte
+{
+ /// Invalid (uninitialized) style
+ invalid = 0,
+ /// Block style.
+ block,
+ /// Flow style.
+ flow
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/tagdirective.d b/src/ext_depends/D-YAML/source/dyaml/tagdirective.d
new file mode 100644
index 0000000..54687fe
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/tagdirective.d
@@ -0,0 +1,15 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+///Tag directives.
+module dyaml.tagdirective;
+
+///Single tag directive. handle is the shortcut, prefix is the prefix that replaces it.
+struct TagDirective
+{
+ string handle;
+ string prefix;
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/common.d b/src/ext_depends/D-YAML/source/dyaml/test/common.d
new file mode 100644
index 0000000..a6bafa9
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/common.d
@@ -0,0 +1,223 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.common;
+
+version(unittest)
+{
+
+import dyaml.node;
+import dyaml.event;
+
+import core.exception;
+import std.algorithm;
+import std.array;
+import std.conv;
+import std.file;
+import std.range;
+import std.path;
+import std.traits;
+import std.typecons;
+
+package:
+
+/**
+Run a test.
+
+Params:
+ testFunction = Unittest function.
+ unittestExt = Extensions of data files needed for the unittest.
+ skipExt = Extensions that must not be used for the unittest.
+ */
+void run(D)(D testFunction, string[] unittestExt, string[] skipExt = [])
+{
+ immutable string dataDir = __FILE_FULL_PATH__.dirName ~ "/../../../test/data";
+ auto testFilenames = findTestFilenames(dataDir);
+
+ if (unittestExt.length > 0)
+ {
+ outer: foreach (base, extensions; testFilenames)
+ {
+ string[] filenames;
+ foreach (ext; unittestExt)
+ {
+ if (!extensions.canFind(ext))
+ {
+ continue outer;
+ }
+ filenames ~= base ~ '.' ~ ext;
+ }
+ foreach (ext; skipExt)
+ {
+ if (extensions.canFind(ext))
+ {
+ continue outer;
+ }
+ }
+
+ execute(testFunction, filenames);
+ }
+ }
+ else
+ {
+ execute(testFunction, string[].init);
+ }
+}
+
+// TODO: remove when a @safe ubyte[] file read can be done.
+/**
+Reads a file as an array of bytes.
+
+Params:
+ filename = Full path to file to read.
+
+Returns: The file's data.
+*/
+ubyte[] readData(string filename) @trusted
+{
+ import std.file : read;
+ return cast(ubyte[])read(filename);
+}
+void assertNodesEqual(const scope Node gotNode, const scope Node expectedNode) @safe
+{
+ import std.format : format;
+ assert(gotNode == expectedNode, format!"got %s, expected %s"(gotNode.debugString, expectedNode.debugString));
+}
+
+/**
+Determine if events in events1 are equivalent to events in events2.
+
+Params:
+ events1 = A range of events to compare with.
+ events2 = A second range of events to compare.
+
+Returns: true if the events are equivalent, false otherwise.
+*/
+bool compareEvents(T, U)(T events1, U events2)
+if (isInputRange!T && isInputRange!U && is(ElementType!T == Event) && is(ElementType!U == Event))
+{
+ foreach (e1, e2; zip(events1, events2))
+ {
+ //Different event types.
+ if (e1.id != e2.id)
+ {
+ return false;
+ }
+ //Different anchor (if applicable).
+ if (e1.id.among!(EventID.sequenceStart, EventID.mappingStart, EventID.alias_, EventID.scalar)
+ && e1.anchor != e2.anchor)
+ {
+ return false;
+ }
+ //Different collection tag (if applicable).
+ if (e1.id.among!(EventID.sequenceStart, EventID.mappingStart) && e1.tag != e2.tag)
+ {
+ return false;
+ }
+ if (e1.id == EventID.scalar)
+ {
+ //Different scalar tag (if applicable).
+ if (!(e1.implicit || e2.implicit) && e1.tag != e2.tag)
+ {
+ return false;
+ }
+ //Different scalar value.
+ if (e1.value != e2.value)
+ {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+/**
+Throw an Error if events in events1 aren't equivalent to events in events2.
+
+Params:
+ events1 = First event array to compare.
+ events2 = Second event array to compare.
+*/
+void assertEventsEqual(T, U)(T events1, U events2)
+if (isInputRange!T && isInputRange!U && is(ElementType!T == Event) && is(ElementType!U == Event))
+{
+ auto events1Copy = events1.array;
+ auto events2Copy = events2.array;
+ assert(compareEvents(events1Copy, events2Copy), text("Got '", events1Copy, "', expected '", events2Copy, "'"));
+}
+
+private:
+
+/**
+Find unittest input filenames.
+
+Params: dir = Directory to look in.
+
+Returns: Test input base filenames and their extensions.
+*/
+ //@trusted due to dirEntries
+string[][string] findTestFilenames(const string dir) @trusted
+{
+ //Groups of extensions indexed by base names.
+ string[][string] names;
+ foreach (string name; dirEntries(dir, SpanMode.shallow))
+ {
+ if (isFile(name))
+ {
+ string base = name.stripExtension();
+ string ext = name.extension();
+ if (ext is null)
+ {
+ ext = "";
+ }
+ if (ext[0] == '.')
+ {
+ ext = ext[1 .. $];
+ }
+
+ //If the base name doesn't exist yet, add it; otherwise add new extension.
+ names[base] = ((base in names) is null) ? [ext] : names[base] ~ ext;
+ }
+ }
+ return names;
+}
+
+/**
+Recursively copy an array of strings to a tuple to use for unittest function input.
+
+Params:
+ index = Current index in the array/tuple.
+ tuple = Tuple to copy to.
+ strings = Strings to copy.
+*/
+void stringsToTuple(uint index, F ...)(ref F tuple, const string[] strings)
+in(F.length == strings.length)
+do
+{
+ tuple[index] = strings[index];
+ static if (index > 0)
+ {
+ stringsToTuple!(index - 1, F)(tuple, strings);
+ }
+}
+
+/**
+Execute an unittest on specified files.
+
+Params:
+ testName = Name of the unittest.
+ testFunction = Unittest function.
+ filenames = Names of input files to test with.
+ */
+void execute(D)(D testFunction, string[] filenames)
+{
+ //Convert filenames to parameters tuple and call the test function.
+ alias F = Parameters!D[0..$];
+ F parameters;
+ stringsToTuple!(F.length - 1, F)(parameters, filenames);
+ testFunction(parameters);
+}
+
+} // version(unittest)
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/compare.d b/src/ext_depends/D-YAML/source/dyaml/test/compare.d
new file mode 100644
index 0000000..5a37fd0
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/compare.d
@@ -0,0 +1,51 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.compare;
+
+@safe unittest
+{
+ import dyaml : Loader;
+ import dyaml.test.common : assertNodesEqual, compareEvents, run;
+
+ /**
+ Test parser by comparing output from parsing two equivalent YAML files.
+
+ Params:
+ dataFilename = YAML file to parse.
+ canonicalFilename = Another file to parse, in canonical YAML format.
+ */
+ static void testParser(string dataFilename, string canonicalFilename) @safe
+ {
+ auto dataEvents = Loader.fromFile(dataFilename).parse();
+ auto canonicalEvents = Loader.fromFile(canonicalFilename).parse();
+
+ //BUG: the return value isn't checked! This test currently fails...
+ compareEvents(dataEvents, canonicalEvents);
+ }
+
+ /**
+ Test loader by comparing output from loading two equivalent YAML files.
+
+ Params:
+ dataFilename = YAML file to load.
+ canonicalFilename = Another file to load, in canonical YAML format.
+ */
+ static void testLoader(string dataFilename, string canonicalFilename) @safe
+ {
+ import std.array : array;
+ auto data = Loader.fromFile(dataFilename).array;
+ auto canonical = Loader.fromFile(canonicalFilename).array;
+
+ assert(data.length == canonical.length, "Unequal node count");
+ foreach (n; 0 .. data.length)
+ {
+ assertNodesEqual(data[n], canonical[n]);
+ }
+ }
+ run(&testParser, ["data", "canonical"]);
+ run(&testLoader, ["data", "canonical"], ["test_loader_skip"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/constructor.d b/src/ext_depends/D-YAML/source/dyaml/test/constructor.d
new file mode 100644
index 0000000..aeb8653
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/constructor.d
@@ -0,0 +1,957 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.constructor;
+
+
+version(unittest)
+{
+
+import std.conv;
+import std.datetime;
+import std.exception;
+import std.path;
+import std.string;
+import std.typecons;
+
+import dyaml : Loader, Node, YAMLNull;
+
+///Expected results of loading test inputs.
+Node[][string] expected;
+
+///Initialize expected.
+static this() @safe
+{
+ expected["aliases-cdumper-bug"] = constructAliasesCDumperBug();
+ expected["construct-binary"] = constructBinary();
+ expected["construct-bool"] = constructBool();
+ expected["construct-custom"] = constructCustom();
+ expected["construct-float"] = constructFloat();
+ expected["construct-int"] = constructInt();
+ expected["construct-map"] = constructMap();
+ expected["construct-merge"] = constructMerge();
+ expected["construct-null"] = constructNull();
+ expected["construct-omap"] = constructOMap();
+ expected["construct-pairs"] = constructPairs();
+ expected["construct-seq"] = constructSeq();
+ expected["construct-set"] = constructSet();
+ expected["construct-str-ascii"] = constructStrASCII();
+ expected["construct-str"] = constructStr();
+ expected["construct-str-utf8"] = constructStrUTF8();
+ expected["construct-timestamp"] = constructTimestamp();
+ expected["construct-value"] = constructValue();
+ expected["duplicate-merge-key"] = duplicateMergeKey();
+ expected["float-representer-2.3-bug"] = floatRepresenterBug();
+ expected["invalid-single-quote-bug"] = invalidSingleQuoteBug();
+ expected["more-floats"] = moreFloats();
+ expected["negative-float-bug"] = negativeFloatBug();
+ expected["single-dot-is-not-float-bug"] = singleDotFloatBug();
+ expected["timestamp-bugs"] = timestampBugs();
+ expected["utf16be"] = utf16be();
+ expected["utf16le"] = utf16le();
+ expected["utf8"] = utf8();
+ expected["utf8-implicit"] = utf8implicit();
+}
+
+///Construct a pair of nodes with specified values.
+Node.Pair pair(A, B)(A a, B b)
+{
+ return Node.Pair(a,b);
+}
+
+///Test cases:
+
+Node[] constructAliasesCDumperBug() @safe
+{
+ return [
+ Node(
+ [
+ Node("today", "tag:yaml.org,2002:str"),
+ Node("today", "tag:yaml.org,2002:str")
+ ],
+ "tag:yaml.org,2002:seq")
+ ];
+}
+
+Node[] constructBinary() @safe
+{
+ auto canonical = "GIF89a\x0c\x00\x0c\x00\x84\x00\x00\xff\xff\xf7\xf5\xf5\xee\xe9\xe9\xe5fff\x00\x00\x00\xe7\xe7\xe7^^^\xf3\xf3\xed\x8e\x8e\x8e\xe0\xe0\xe0\x9f\x9f\x9f\x93\x93\x93\xa7\xa7\xa7\x9e\x9e\x9eiiiccc\xa3\xa3\xa3\x84\x84\x84\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9!\xfe\x0eMade with GIMP\x00,\x00\x00\x00\x00\x0c\x00\x0c\x00\x00\x05, \x8e\x810\x9e\xe3@\x14\xe8i\x10\xc4\xd1\x8a\x08\x1c\xcf\x80M$z\xef\xff0\x85p\xb8\xb01f\r\x1b\xce\x01\xc3\x01\x1e\x10' \x82\n\x01\x00;".representation.dup;
+ auto generic = "GIF89a\x0c\x00\x0c\x00\x84\x00\x00\xff\xff\xf7\xf5\xf5\xee\xe9\xe9\xe5fff\x00\x00\x00\xe7\xe7\xe7^^^\xf3\xf3\xed\x8e\x8e\x8e\xe0\xe0\xe0\x9f\x9f\x9f\x93\x93\x93\xa7\xa7\xa7\x9e\x9e\x9eiiiccc\xa3\xa3\xa3\x84\x84\x84\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9!\xfe\x0eMade with GIMP\x00,\x00\x00\x00\x00\x0c\x00\x0c\x00\x00\x05, \x8e\x810\x9e\xe3@\x14\xe8i\x10\xc4\xd1\x8a\x08\x1c\xcf\x80M$z\xef\xff0\x85p\xb8\xb01f\r\x1b\xce\x01\xc3\x01\x1e\x10' \x82\n\x01\x00;".representation.dup;
+ auto description = "The binary value above is a tiny arrow encoded as a gif image.";
+
+ return [
+ Node(
+ [
+ pair(
+ Node("canonical", "tag:yaml.org,2002:str"),
+ Node(canonical, "tag:yaml.org,2002:binary")
+ ),
+ pair(
+ Node("generic", "tag:yaml.org,2002:str"),
+ Node(generic, "tag:yaml.org,2002:binary")
+ ),
+ pair(
+ Node("description", "tag:yaml.org,2002:str"),
+ Node(description, "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructBool() @safe
+{
+ const(bool) a = true;
+ immutable(bool) b = true;
+ const bool aa = true;
+ immutable bool bb = true;
+ return [
+ Node(
+ [
+ pair(
+ Node("canonical", "tag:yaml.org,2002:str"),
+ Node(true, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("answer", "tag:yaml.org,2002:str"),
+ Node(false, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("logical", "tag:yaml.org,2002:str"),
+ Node(true, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("option", "tag:yaml.org,2002:str"),
+ Node(true, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("constbool", "tag:yaml.org,2002:str"),
+ Node(a, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("imutbool", "tag:yaml.org,2002:str"),
+ Node(b, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("const_bool", "tag:yaml.org,2002:str"),
+ Node(aa, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("imut_bool", "tag:yaml.org,2002:str"),
+ Node(bb, "tag:yaml.org,2002:bool")
+ ),
+ pair(
+ Node("but", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node("is a string", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("n", "tag:yaml.org,2002:str"),
+ Node("is a string", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructCustom() @safe
+{
+ return [
+ Node(
+ [
+ Node(new TestClass(1, 2, 3)),
+ Node(TestStruct(10))
+ ],
+ "tag:yaml.org,2002:seq")
+ ];
+}
+
+Node[] constructFloat() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("canonical", "tag:yaml.org,2002:str"),
+ Node(685230.15L, "tag:yaml.org,2002:float")
+ ),
+ pair(
+ Node("exponential", "tag:yaml.org,2002:str"),
+ Node(685230.15L, "tag:yaml.org,2002:float")
+ ),
+ pair(
+ Node("fixed", "tag:yaml.org,2002:str"),
+ Node(685230.15L, "tag:yaml.org,2002:float")
+ ),
+ pair(
+ Node("sexagesimal", "tag:yaml.org,2002:str"),
+ Node(685230.15L, "tag:yaml.org,2002:float")
+ ),
+ pair(
+ Node("negative infinity", "tag:yaml.org,2002:str"),
+ Node(-real.infinity, "tag:yaml.org,2002:float")
+ ),
+ pair(
+ Node("not a number", "tag:yaml.org,2002:str"),
+ Node(real.nan, "tag:yaml.org,2002:float")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructInt() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("canonical", "tag:yaml.org,2002:str"),
+ Node(685230L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("decimal", "tag:yaml.org,2002:str"),
+ Node(685230L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("octal", "tag:yaml.org,2002:str"),
+ Node(685230L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("hexadecimal", "tag:yaml.org,2002:str"),
+ Node(685230L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("binary", "tag:yaml.org,2002:str"),
+ Node(685230L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("sexagesimal", "tag:yaml.org,2002:str"),
+ Node(685230L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructMap() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("Block style", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(
+ Node("Clark", "tag:yaml.org,2002:str"),
+ Node("Evans", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("Brian", "tag:yaml.org,2002:str"),
+ Node("Ingerson", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("Oren", "tag:yaml.org,2002:str"),
+ Node("Ben-Kiki", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ),
+ pair(
+ Node("Flow style", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(
+ Node("Clark", "tag:yaml.org,2002:str"),
+ Node("Evans", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("Brian", "tag:yaml.org,2002:str"),
+ Node("Ingerson", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("Oren", "tag:yaml.org,2002:str"),
+ Node("Ben-Kiki", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructMerge() @safe
+{
+ return [
+ Node(
+ [
+ Node(
+ [
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(0L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("r", "tag:yaml.org,2002:str"),
+ Node(10L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("r", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("r", "tag:yaml.org,2002:str"),
+ Node(10L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("label", "tag:yaml.org,2002:str"),
+ Node("center/big", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("r", "tag:yaml.org,2002:str"),
+ Node(10L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("label", "tag:yaml.org,2002:str"),
+ Node("center/big", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("label", "tag:yaml.org,2002:str"),
+ Node("center/big", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("r", "tag:yaml.org,2002:str"),
+ Node(10L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("label", "tag:yaml.org,2002:str"),
+ Node("center/big", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("r", "tag:yaml.org,2002:str"),
+ Node(10L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ],
+ "tag:yaml.org,2002:seq")
+ ];
+}
+
+Node[] constructNull() @safe
+{
+ return [
+ Node(YAMLNull(), "tag:yaml.org,2002:null"),
+ Node(
+ [
+ pair(
+ Node("empty", "tag:yaml.org,2002:str"),
+ Node(YAMLNull(), "tag:yaml.org,2002:null")
+ ),
+ pair(
+ Node("canonical", "tag:yaml.org,2002:str"),
+ Node(YAMLNull(), "tag:yaml.org,2002:null")
+ ),
+ pair(
+ Node("english", "tag:yaml.org,2002:str"),
+ Node(YAMLNull(), "tag:yaml.org,2002:null")
+ ),
+ pair(
+ Node(YAMLNull(), "tag:yaml.org,2002:null"),
+ Node("null key", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("sparse", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ Node(YAMLNull(), "tag:yaml.org,2002:null"),
+ Node("2nd entry", "tag:yaml.org,2002:str"),
+ Node(YAMLNull(), "tag:yaml.org,2002:null"),
+ Node("4th entry", "tag:yaml.org,2002:str"),
+ Node(YAMLNull(), "tag:yaml.org,2002:null")
+ ],
+ "tag:yaml.org,2002:seq")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructOMap() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("Bestiary", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(
+ Node("aardvark", "tag:yaml.org,2002:str"),
+ Node("African pig-like ant eater. Ugly.", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("anteater", "tag:yaml.org,2002:str"),
+ Node("South-American ant eater. Two species.", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("anaconda", "tag:yaml.org,2002:str"),
+ Node("South-American constrictor snake. Scaly.", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:omap")
+ ),
+ pair(
+ Node("Numbers", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(
+ Node("one", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("two", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("three", "tag:yaml.org,2002:str"),
+ Node(3L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:omap")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructPairs() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("Block tasks", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(Node("meeting", "tag:yaml.org,2002:str"), Node("with team.", "tag:yaml.org,2002:str")),
+ pair(Node("meeting", "tag:yaml.org,2002:str"), Node("with boss.", "tag:yaml.org,2002:str")),
+ pair(Node("break", "tag:yaml.org,2002:str"), Node("lunch.", "tag:yaml.org,2002:str")),
+ pair(Node("meeting", "tag:yaml.org,2002:str"), Node("with client.", "tag:yaml.org,2002:str"))
+ ],
+ "tag:yaml.org,2002:pairs")
+ ),
+ pair(
+ Node("Flow tasks", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ pair(Node("meeting", "tag:yaml.org,2002:str"), Node("with team", "tag:yaml.org,2002:str")),
+ pair(Node("meeting", "tag:yaml.org,2002:str"), Node("with boss", "tag:yaml.org,2002:str"))
+ ],
+ "tag:yaml.org,2002:pairs")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructSeq() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("Block style", "tag:yaml.org,2002:str"),
+ Node([
+ Node("Mercury", "tag:yaml.org,2002:str"),
+ Node("Venus", "tag:yaml.org,2002:str"),
+ Node("Earth", "tag:yaml.org,2002:str"),
+ Node("Mars", "tag:yaml.org,2002:str"),
+ Node("Jupiter", "tag:yaml.org,2002:str"),
+ Node("Saturn", "tag:yaml.org,2002:str"),
+ Node("Uranus", "tag:yaml.org,2002:str"),
+ Node("Neptune", "tag:yaml.org,2002:str"),
+ Node("Pluto", "tag:yaml.org,2002:str")
+ ], "tag:yaml.org,2002:seq")
+ ),
+ pair(
+ Node("Flow style", "tag:yaml.org,2002:str"),
+ Node([
+ Node("Mercury", "tag:yaml.org,2002:str"),
+ Node("Venus", "tag:yaml.org,2002:str"),
+ Node("Earth", "tag:yaml.org,2002:str"),
+ Node("Mars", "tag:yaml.org,2002:str"),
+ Node("Jupiter", "tag:yaml.org,2002:str"),
+ Node("Saturn", "tag:yaml.org,2002:str"),
+ Node("Uranus", "tag:yaml.org,2002:str"),
+ Node("Neptune", "tag:yaml.org,2002:str"),
+ Node("Pluto", "tag:yaml.org,2002:str")
+ ], "tag:yaml.org,2002:seq")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructSet() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("baseball players", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ Node("Mark McGwire", "tag:yaml.org,2002:str"),
+ Node("Sammy Sosa", "tag:yaml.org,2002:str"),
+ Node("Ken Griffey", "tag:yaml.org,2002:str")
+ ],
+ "tag:yaml.org,2002:set")
+ ),
+ pair(
+ Node("baseball teams", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ Node("Boston Red Sox", "tag:yaml.org,2002:str"),
+ Node("Detroit Tigers", "tag:yaml.org,2002:str"),
+ Node("New York Yankees", "tag:yaml.org,2002:str")
+ ],
+ "tag:yaml.org,2002:set")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructStrASCII() @safe
+{
+ return [
+ Node("ascii string", "tag:yaml.org,2002:str")
+ ];
+}
+
+Node[] constructStr() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("string", "tag:yaml.org,2002:str"),
+ Node("abcd", "tag:yaml.org,2002:str")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructStrUTF8() @safe
+{
+ return [
+ Node("\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430", "tag:yaml.org,2002:str")
+ ];
+}
+
+Node[] constructTimestamp() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("canonical", "tag:yaml.org,2002:str"),
+ Node(SysTime(DateTime(2001, 12, 15, 2, 59, 43), 1000000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp")
+ ),
+ pair(
+ Node("valid iso8601", "tag:yaml.org,2002:str"),
+ Node(SysTime(DateTime(2001, 12, 15, 2, 59, 43), 1000000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp")
+ ),
+ pair(
+ Node("space separated", "tag:yaml.org,2002:str"),
+ Node(SysTime(DateTime(2001, 12, 15, 2, 59, 43), 1000000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp")
+ ),
+ pair(
+ Node("no time zone (Z)", "tag:yaml.org,2002:str"),
+ Node(SysTime(DateTime(2001, 12, 15, 2, 59, 43), 1000000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp")
+ ),
+ pair(
+ Node("date (00:00:00Z)", "tag:yaml.org,2002:str"),
+ Node(SysTime(DateTime(2002, 12, 14), UTC()), "tag:yaml.org,2002:timestamp")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] constructValue() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("link with", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ Node("library1.dll", "tag:yaml.org,2002:str"),
+ Node("library2.dll", "tag:yaml.org,2002:str")
+ ],
+ "tag:yaml.org,2002:seq")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("link with", "tag:yaml.org,2002:str"),
+ Node(
+ [
+ Node(
+ [
+ pair(
+ Node("=", "tag:yaml.org,2002:value"),
+ Node("library1.dll", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("version", "tag:yaml.org,2002:str"),
+ Node(1.2L, "tag:yaml.org,2002:float")
+ )
+ ],
+ "tag:yaml.org,2002:map"),
+ Node(
+ [
+ pair(
+ Node("=", "tag:yaml.org,2002:value"),
+ Node("library2.dll", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("version", "tag:yaml.org,2002:str"),
+ Node(2.3L, "tag:yaml.org,2002:float")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ],
+ "tag:yaml.org,2002:seq")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] duplicateMergeKey() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node("foo", "tag:yaml.org,2002:str"),
+ Node("bar", "tag:yaml.org,2002:str")
+ ),
+ pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(2L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("z", "tag:yaml.org,2002:str"),
+ Node(3L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node("t", "tag:yaml.org,2002:str"),
+ Node(4L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] floatRepresenterBug() @safe
+{
+ return [
+ Node(
+ [
+ pair(
+ Node(1.0L, "tag:yaml.org,2002:float"),
+ Node(1L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node(real.infinity, "tag:yaml.org,2002:float"),
+ Node(10L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node(-real.infinity, "tag:yaml.org,2002:float"),
+ Node(-10L, "tag:yaml.org,2002:int")
+ ),
+ pair(
+ Node(real.nan, "tag:yaml.org,2002:float"),
+ Node(100L, "tag:yaml.org,2002:int")
+ )
+ ],
+ "tag:yaml.org,2002:map")
+ ];
+}
+
+Node[] invalidSingleQuoteBug() @safe
+{
+ return [
+ Node(
+ [
+ Node("foo \'bar\'", "tag:yaml.org,2002:str"),
+ Node("foo\n\'bar\'", "tag:yaml.org,2002:str")
+ ],
+ "tag:yaml.org,2002:seq")
+ ];
+}
+
+Node[] moreFloats() @safe
+{
+ return [
+ Node(
+ [
+ Node(0.0L, "tag:yaml.org,2002:float"),
+ Node(1.0L, "tag:yaml.org,2002:float"),
+ Node(-1.0L, "tag:yaml.org,2002:float"),
+ Node(real.infinity, "tag:yaml.org,2002:float"),
+ Node(-real.infinity, "tag:yaml.org,2002:float"),
+ Node(real.nan, "tag:yaml.org,2002:float"),
+ Node(real.nan, "tag:yaml.org,2002:float")
+ ],
+ "tag:yaml.org,2002:seq")
+ ];
+}
+
+Node[] negativeFloatBug() @safe
+{
+ return [
+ Node(-1.0L, "tag:yaml.org,2002:float")
+ ];
+}
+
+Node[] singleDotFloatBug() @safe
+{
+ return [
+ Node(".", "tag:yaml.org,2002:str")
+ ];
+}
+
+Node[] timestampBugs() @safe
+{
+ return [
+ Node(
+ [
+ Node(SysTime(DateTime(2001, 12, 15, 3, 29, 43), 1000000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp"),
+ Node(SysTime(DateTime(2001, 12, 14, 16, 29, 43), 1000000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp"),
+ Node(SysTime(DateTime(2001, 12, 14, 21, 59, 43), 10100.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp"),
+ Node(SysTime(DateTime(2001, 12, 14, 21, 59, 43), new immutable SimpleTimeZone(60.dur!"minutes")), "tag:yaml.org,2002:timestamp"),
+ Node(SysTime(DateTime(2001, 12, 14, 21, 59, 43), new immutable SimpleTimeZone(-90.dur!"minutes")), "tag:yaml.org,2002:timestamp"),
+ Node(SysTime(DateTime(2005, 7, 8, 17, 35, 4), 5176000.dur!"hnsecs", UTC()), "tag:yaml.org,2002:timestamp")
+ ],
+ "tag:yaml.org,2002:seq")
+ ];
+}
+
+Node[] utf16be() @safe
+{
+ return [
+ Node("UTF-16-BE", "tag:yaml.org,2002:str")
+ ];
+}
+
+Node[] utf16le() @safe
+{
+ return [
+ Node("UTF-16-LE", "tag:yaml.org,2002:str")
+ ];
+}
+
+Node[] utf8() @safe
+{
+ return [
+ Node("UTF-8", "tag:yaml.org,2002:str")
+ ];
+}
+
+Node[] utf8implicit() @safe
+{
+ return [
+ Node("implicit UTF-8", "tag:yaml.org,2002:str")
+ ];
+}
+
+///Testing custom YAML class type.
+class TestClass
+{
+ int x, y, z;
+
+ this(int x, int y, int z) @safe
+ {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ Node opCast(T: Node)() @safe
+ {
+ return Node(
+ [
+ Node.Pair(
+ Node("x", "tag:yaml.org,2002:str"),
+ Node(x, "tag:yaml.org,2002:int")
+ ),
+ Node.Pair(
+ Node("y", "tag:yaml.org,2002:str"),
+ Node(y, "tag:yaml.org,2002:int")
+ ),
+ Node.Pair(
+ Node("z", "tag:yaml.org,2002:str"),
+ Node(z, "tag:yaml.org,2002:int")
+ )
+ ],
+ "!tag1");
+ }
+}
+
+///Testing custom YAML struct type.
+struct TestStruct
+{
+ int value;
+
+ this (int x) @safe
+ {
+ value = x;
+ }
+
+ ///Constructor function for TestStruct.
+ this(ref Node node) @safe
+ {
+ value = node.as!string.to!int;
+ }
+
+ ///Representer function for TestStruct.
+ Node opCast(T: Node)() @safe
+ {
+ return Node(value.to!string, "!tag2");
+ }
+}
+
+} // version(unittest)
+
+
+@safe unittest
+{
+ import dyaml.test.common : assertNodesEqual, run;
+ /**
+ Constructor unittest.
+
+ Params:
+ dataFilename = File name to read from.
+ codeDummy = Dummy .code filename, used to determine that
+ .data file with the same name should be used in this test.
+ */
+ static void testConstructor(string dataFilename, string codeDummy) @safe
+ {
+ string base = dataFilename.baseName.stripExtension;
+ assert((base in expected) !is null, "Unimplemented constructor test: " ~ base);
+
+ auto loader = Loader.fromFile(dataFilename);
+
+ Node[] exp = expected[base];
+
+ //Compare with expected results document by document.
+ size_t i;
+ foreach (node; loader)
+ {
+ assertNodesEqual(node, exp[i]);
+ ++i;
+ }
+ assert(i == exp.length);
+ }
+ run(&testConstructor, ["data", "code"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/emitter.d b/src/ext_depends/D-YAML/source/dyaml/test/emitter.d
new file mode 100644
index 0000000..293f236
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/emitter.d
@@ -0,0 +1,132 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.emitter;
+
+@safe unittest
+{
+ import std.array : Appender;
+ import std.range : ElementType, isInputRange;
+
+ import dyaml : CollectionStyle, LineBreak, Loader, Mark, ScalarStyle;
+ import dyaml.emitter : Emitter;
+ import dyaml.event : Event, EventID, mappingStartEvent, scalarEvent, sequenceStartEvent;
+ import dyaml.test.common : assertEventsEqual, run;
+
+ // Try to emit an event range.
+ static void emitTestCommon(T)(ref Appender!string emitStream, T events, bool canonical = false) @safe
+ if (isInputRange!T && is(ElementType!T == Event))
+ {
+ auto emitter = Emitter!(typeof(emitStream), char)(emitStream, canonical, 2, 80, LineBreak.unix);
+ foreach (ref event; events)
+ {
+ emitter.emit(event);
+ }
+ }
+ /**
+ Test emitter by getting events from parsing a file, emitting them, parsing
+ the emitted result and comparing events from parsing the emitted result with
+ originally parsed events.
+
+ Params:
+ dataFilename = YAML file to parse.
+ canonicalFilename = Canonical YAML file used as dummy to determine
+ which data files to load.
+ */
+ static void testEmitterOnData(string dataFilename, string canonicalFilename) @safe
+ {
+ //Must exist due to Anchor, Tags reference counts.
+ auto loader = Loader.fromFile(dataFilename);
+ auto events = loader.parse();
+ auto emitStream = Appender!string();
+ emitTestCommon(emitStream, events);
+
+ auto loader2 = Loader.fromString(emitStream.data);
+ loader2.name = "TEST";
+ auto newEvents = loader2.parse();
+ assertEventsEqual(events, newEvents);
+ }
+ /**
+ Test emitter by getting events from parsing a canonical YAML file, emitting
+ them both in canonical and normal format, parsing the emitted results and
+ comparing events from parsing the emitted result with originally parsed events.
+
+ Params: canonicalFilename = Canonical YAML file to parse.
+ */
+ static void testEmitterOnCanonical(string canonicalFilename) @safe
+ {
+ //Must exist due to Anchor, Tags reference counts.
+ auto loader = Loader.fromFile(canonicalFilename);
+ auto events = loader.parse();
+ foreach (canonical; [false, true])
+ {
+ auto emitStream = Appender!string();
+ emitTestCommon(emitStream, events, canonical);
+
+ auto loader2 = Loader.fromString(emitStream.data);
+ loader2.name = "TEST";
+ auto newEvents = loader2.parse();
+ assertEventsEqual(events, newEvents);
+ }
+ }
+ /**
+ Test emitter by getting events from parsing a file, emitting them with all
+ possible scalar and collection styles, parsing the emitted results and
+ comparing events from parsing the emitted result with originally parsed events.
+
+ Params:
+ dataFilename = YAML file to parse.
+ canonicalFilename = Canonical YAML file used as dummy to determine
+ which data files to load.
+ */
+ static void testEmitterStyles(string dataFilename, string canonicalFilename) @safe
+ {
+ foreach (filename; [dataFilename, canonicalFilename])
+ {
+ //must exist due to Anchor, Tags reference counts
+ auto loader = Loader.fromFile(canonicalFilename);
+ auto events = loader.parse();
+ foreach (flowStyle; [CollectionStyle.block, CollectionStyle.flow])
+ {
+ foreach (style; [ScalarStyle.literal, ScalarStyle.folded,
+ ScalarStyle.doubleQuoted, ScalarStyle.singleQuoted,
+ ScalarStyle.plain])
+ {
+ Event[] styledEvents;
+ foreach (event; events)
+ {
+ if (event.id == EventID.scalar)
+ {
+ event = scalarEvent(Mark(), Mark(), event.anchor, event.tag,
+ event.implicit,
+ event.value, style);
+ }
+ else if (event.id == EventID.sequenceStart)
+ {
+ event = sequenceStartEvent(Mark(), Mark(), event.anchor,
+ event.tag, event.implicit, flowStyle);
+ }
+ else if (event.id == EventID.mappingStart)
+ {
+ event = mappingStartEvent(Mark(), Mark(), event.anchor,
+ event.tag, event.implicit, flowStyle);
+ }
+ styledEvents ~= event;
+ }
+ auto emitStream = Appender!string();
+ emitTestCommon(emitStream, styledEvents);
+ auto loader2 = Loader.fromString(emitStream.data);
+ loader2.name = "TEST";
+ auto newEvents = loader2.parse();
+ assertEventsEqual(events, newEvents);
+ }
+ }
+ }
+ }
+ run(&testEmitterOnData, ["data", "canonical"]);
+ run(&testEmitterOnCanonical, ["canonical"]);
+ run(&testEmitterStyles, ["data", "canonical"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/errors.d b/src/ext_depends/D-YAML/source/dyaml/test/errors.d
new file mode 100644
index 0000000..43b019c
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/errors.d
@@ -0,0 +1,64 @@
+
+// Copyright Ferdinand Majerech 2011-2014
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.errors;
+
+@safe unittest
+{
+ import std.array : array;
+ import std.exception : assertThrown;
+
+ import dyaml : Loader;
+ import dyaml.test.common : run;
+
+ /**
+ Loader error unittest from file stream.
+
+ Params: errorFilename = File name to read from.
+ */
+ static void testLoaderError(string errorFilename) @safe
+ {
+ assertThrown(Loader.fromFile(errorFilename).array,
+ __FUNCTION__ ~ "(" ~ errorFilename ~ ") Expected an exception");
+ }
+
+ /**
+ Loader error unittest from string.
+
+ Params: errorFilename = File name to read from.
+ */
+ static void testLoaderErrorString(string errorFilename) @safe
+ {
+ assertThrown(Loader.fromFile(errorFilename).array,
+ __FUNCTION__ ~ "(" ~ errorFilename ~ ") Expected an exception");
+ }
+
+ /**
+ Loader error unittest from filename.
+
+ Params: errorFilename = File name to read from.
+ */
+ static void testLoaderErrorFilename(string errorFilename) @safe
+ {
+ assertThrown(Loader.fromFile(errorFilename).array,
+ __FUNCTION__ ~ "(" ~ errorFilename ~ ") Expected an exception");
+ }
+
+ /**
+ Loader error unittest loading a single document from a file.
+
+ Params: errorFilename = File name to read from.
+ */
+ static void testLoaderErrorSingle(string errorFilename) @safe
+ {
+ assertThrown(Loader.fromFile(errorFilename).load(),
+ __FUNCTION__ ~ "(" ~ errorFilename ~ ") Expected an exception");
+ }
+ run(&testLoaderError, ["loader-error"]);
+ run(&testLoaderErrorString, ["loader-error"]);
+ run(&testLoaderErrorFilename, ["loader-error"]);
+ run(&testLoaderErrorSingle, ["single-loader-error"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/inputoutput.d b/src/ext_depends/D-YAML/source/dyaml/test/inputoutput.d
new file mode 100644
index 0000000..758def8
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/inputoutput.d
@@ -0,0 +1,92 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.inputoutput;
+
+@safe unittest
+{
+ import std.array : join, split;
+ import std.conv : to;
+ import std.exception : assertThrown;
+ import std.file : readText;
+ import std.system : endian, Endian;
+
+ import dyaml : Loader, Node, YAMLException;
+ import dyaml.test.common : run;
+
+ /**
+ Get an UTF-16 byte order mark.
+
+ Params: wrong = Get the incorrect BOM for this system.
+
+ Returns: UTF-16 byte order mark.
+ */
+ static wchar bom16(bool wrong = false) pure @safe
+ {
+ wchar little = '\uFEFF';
+ wchar big = '\uFFFE';
+ if (!wrong)
+ {
+ return endian == Endian.littleEndian ? little : big;
+ }
+ return endian == Endian.littleEndian ? big : little;
+ }
+ /**
+ Get an UTF-32 byte order mark.
+
+ Params: wrong = Get the incorrect BOM for this system.
+
+ Returns: UTF-32 byte order mark.
+ */
+ static dchar bom32(bool wrong = false) pure @safe
+ {
+ dchar little = '\uFEFF';
+ dchar big = '\uFFFE';
+ if (!wrong)
+ {
+ return endian == Endian.littleEndian ? little : big;
+ }
+ return endian == Endian.littleEndian ? big : little;
+ }
+ /**
+ Unicode input unittest. Tests various encodings.
+
+ Params: unicodeFilename = File name to read from.
+ */
+ static void testUnicodeInput(string unicodeFilename) @safe
+ {
+ string data = readText(unicodeFilename);
+ string expected = data.split().join(" ");
+
+ Node output = Loader.fromString(data).load();
+ assert(output.as!string == expected);
+
+ foreach (buffer; [cast(ubyte[]) (bom16() ~ data.to!(wchar[])),
+ cast(ubyte[]) (bom32() ~ data.to!(dchar[]))])
+ {
+ output = Loader.fromBuffer(buffer).load();
+ assert(output.as!string == expected);
+ }
+ }
+ /**
+ Unicode input error unittest. Tests various encodings with incorrect BOMs.
+
+ Params: unicodeFilename = File name to read from.
+ */
+ static void testUnicodeInputErrors(string unicodeFilename) @safe
+ {
+ string data = readText(unicodeFilename);
+ foreach (buffer; [cast(ubyte[]) (data.to!(wchar[])),
+ cast(ubyte[]) (data.to!(dchar[])),
+ cast(ubyte[]) (bom16(true) ~ data.to!(wchar[])),
+ cast(ubyte[]) (bom32(true) ~ data.to!(dchar[]))])
+ {
+ assertThrown(Loader.fromBuffer(buffer).load());
+ }
+ }
+ run(&testUnicodeInput, ["unicode"]);
+ run(&testUnicodeInputErrors, ["unicode"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/reader.d b/src/ext_depends/D-YAML/source/dyaml/test/reader.d
new file mode 100644
index 0000000..c20df6f
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/reader.d
@@ -0,0 +1,37 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.reader;
+
+@safe unittest
+{
+ import std.exception :assertThrown;
+
+ import dyaml.test.common : readData, run;
+ import dyaml.reader : Reader, ReaderException;
+
+ /**
+ Try reading entire file through Reader, expecting an error (the file is invalid).
+
+ Params: data = Stream to read.
+ */
+ static void runReader(ubyte[] fileData) @safe
+ {
+ auto reader = new Reader(fileData);
+ while(reader.peek() != '\0') { reader.forward(); }
+ }
+
+ /**
+ Stream error unittest. Tries to read invalid input files, expecting errors.
+
+ Params: errorFilename = File name to read from.
+ */
+ static void testStreamError(string errorFilename) @safe
+ {
+ assertThrown!ReaderException(runReader(readData(errorFilename)));
+ }
+ run(&testStreamError, ["stream-error"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/representer.d b/src/ext_depends/D-YAML/source/dyaml/test/representer.d
new file mode 100644
index 0000000..4a1ae67
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/representer.d
@@ -0,0 +1,54 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.representer;
+
+@safe unittest
+{
+ import std.array : Appender, array;
+ import std.meta : AliasSeq;
+ import std.path : baseName, stripExtension;
+ import std.utf : toUTF8;
+
+ import dyaml : dumper, Loader, Node;
+ import dyaml.test.common : assertNodesEqual, run;
+ import dyaml.test.constructor : expected;
+
+ /**
+ Representer unittest. Dumps nodes, then loads them again.
+
+ Params:
+ baseName = Nodes in dyaml.test.constructor.expected for roundtripping.
+ */
+ static void testRepresenterTypes(string baseName) @safe
+ {
+ assert((baseName in expected) !is null, "Unimplemented representer test: " ~ baseName);
+
+ Node[] expectedNodes = expected[baseName];
+ foreach (encoding; AliasSeq!(char, wchar, dchar))
+ {
+ auto emitStream = new Appender!(immutable(encoding)[]);
+ auto dumper = dumper();
+ dumper.dump!encoding(emitStream, expectedNodes);
+
+ immutable output = emitStream.data;
+
+ auto loader = Loader.fromString(emitStream.data.toUTF8);
+ loader.name = "TEST";
+ const readNodes = loader.array;
+
+ assert(expectedNodes.length == readNodes.length);
+ foreach (n; 0 .. expectedNodes.length)
+ {
+ assertNodesEqual(expectedNodes[n], readNodes[n]);
+ }
+ }
+ }
+ foreach (key, _; expected)
+ {
+ testRepresenterTypes(key);
+ }
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/resolver.d b/src/ext_depends/D-YAML/source/dyaml/test/resolver.d
new file mode 100644
index 0000000..ea93720
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/resolver.d
@@ -0,0 +1,39 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.resolver;
+
+@safe unittest
+{
+ import std.conv : text;
+ import std.file : readText;
+ import std.string : strip;
+
+ import dyaml : Loader, Node, NodeID;
+ import dyaml.test.common : run;
+
+
+ /**
+ Implicit tag resolution unittest.
+
+ Params:
+ dataFilename = File with unittest data.
+ detectFilename = Dummy filename used to specify which data filenames to use.
+ */
+ static void testImplicitResolver(string dataFilename, string detectFilename) @safe
+ {
+ const correctTag = readText(detectFilename).strip();
+
+ auto node = Loader.fromFile(dataFilename).load();
+ assert(node.nodeID == NodeID.sequence, text("Expected sequence when reading '", dataFilename, "', got ", node.nodeID));
+ foreach (Node scalar; node)
+ {
+ assert(scalar.nodeID == NodeID.scalar, text("Expected sequence of scalars when reading '", dataFilename, "', got sequence of ", scalar.nodeID));
+ assert(scalar.tag == correctTag, text("Expected tag '", correctTag, "' when reading '", dataFilename, "', got '", scalar.tag, "'"));
+ }
+ }
+ run(&testImplicitResolver, ["data", "detect"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/test/tokens.d b/src/ext_depends/D-YAML/source/dyaml/test/tokens.d
new file mode 100644
index 0000000..c099647
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/test/tokens.d
@@ -0,0 +1,93 @@
+
+// Copyright Ferdinand Majerech 2011.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+module dyaml.test.tokens;
+
+@safe unittest
+{
+ import std.array : split;
+ import std.conv : text;
+ import std.file : readText;
+
+ import dyaml.test.common : run;
+ import dyaml.reader : Reader;
+ import dyaml.scanner : Scanner;
+ import dyaml.token : TokenID;
+
+ // Read and scan a YAML doc, returning a range of tokens.
+ static auto scanTestCommon(string filename) @safe
+ {
+ ubyte[] yamlData = cast(ubyte[])readText(filename).dup;
+ return Scanner(new Reader(yamlData));
+ }
+
+ /**
+ Test tokens output by scanner.
+
+ Params:
+ dataFilename = File to scan.
+ tokensFilename = File containing expected tokens.
+ */
+ static void testTokens(string dataFilename, string tokensFilename) @safe
+ {
+ //representations of YAML tokens in tokens file.
+ auto replace = [
+ TokenID.directive: "%",
+ TokenID.documentStart: "---",
+ TokenID.documentEnd: "...",
+ TokenID.alias_: "*",
+ TokenID.anchor: "&",
+ TokenID.tag: "!",
+ TokenID.scalar: "_",
+ TokenID.blockSequenceStart: "[[",
+ TokenID.blockMappingStart: "{{",
+ TokenID.blockEnd: "]}",
+ TokenID.flowSequenceStart: "[",
+ TokenID.flowSequenceEnd: "]",
+ TokenID.flowMappingStart: "{",
+ TokenID.flowMappingEnd: "}",
+ TokenID.blockEntry: ",",
+ TokenID.flowEntry: ",",
+ TokenID.key: "?",
+ TokenID.value: ":"
+ ];
+
+ string[] tokens;
+ string[] expectedTokens = readText(tokensFilename).split();
+
+ foreach (token; scanTestCommon(dataFilename))
+ {
+ if (token.id != TokenID.streamStart && token.id != TokenID.streamEnd)
+ {
+ tokens ~= replace[token.id];
+ }
+ }
+
+ assert(tokens == expectedTokens,
+ text("In token test for '", tokensFilename, "', expected '", expectedTokens, "', got '", tokens, "'"));
+ }
+
+ /**
+ Test scanner by scanning a file, expecting no errors.
+
+ Params:
+ dataFilename = File to scan.
+ canonicalFilename = Another file to scan, in canonical YAML format.
+ */
+ static void testScanner(string dataFilename, string canonicalFilename) @safe
+ {
+ foreach (filename; [dataFilename, canonicalFilename])
+ {
+ string[] tokens;
+ foreach (token; scanTestCommon(filename))
+ {
+ tokens ~= token.id.text;
+ }
+ }
+ }
+ run(&testTokens, ["data", "tokens"]);
+ run(&testScanner, ["data", "canonical"]);
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/tinyendian.d b/src/ext_depends/D-YAML/source/dyaml/tinyendian.d
new file mode 100644
index 0000000..731b048
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/tinyendian.d
@@ -0,0 +1,213 @@
+// Copyright Ferdinand Majerech 2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/// A minimal library providing functionality for changing the endianness of data.
+module tinyendian;
+
+import std.system : Endian, endian;
+
+/// Unicode UTF encodings.
+enum UTFEncoding : ubyte
+{
+ UTF_8,
+ UTF_16,
+ UTF_32
+}
+///
+@safe unittest
+{
+ const ints = [314, -101];
+ int[2] intsSwapBuffer = ints;
+ swapByteOrder(intsSwapBuffer[]);
+ swapByteOrder(intsSwapBuffer[]);
+ assert(ints == intsSwapBuffer, "Lost information when swapping byte order");
+
+ const floats = [3.14f, 10.1f];
+ float[2] floatsSwapBuffer = floats;
+ swapByteOrder(floatsSwapBuffer[]);
+ swapByteOrder(floatsSwapBuffer[]);
+ assert(floats == floatsSwapBuffer, "Lost information when swapping byte order");
+}
+
+/** Swap byte order of items in an array in place.
+ *
+ * Params:
+ *
+ * T = Item type. Must be either 2 or 4 bytes long.
+ * array = Buffer with values to fix byte order of.
+ */
+void swapByteOrder(T)(T[] array) @trusted @nogc pure nothrow
+if (T.sizeof == 2 || T.sizeof == 4)
+{
+ // Swap the byte order of all read characters.
+ foreach (ref item; array)
+ {
+ static if (T.sizeof == 2)
+ {
+ import std.algorithm.mutation : swap;
+ swap(*cast(ubyte*)&item, *(cast(ubyte*)&item + 1));
+ }
+ else static if (T.sizeof == 4)
+ {
+ import core.bitop : bswap;
+ const swapped = bswap(*cast(uint*)&item);
+ item = *cast(const(T)*)&swapped;
+ }
+ else static assert(false, "Unsupported T: " ~ T.stringof);
+ }
+}
+
+/// See fixUTFByteOrder.
+struct FixUTFByteOrderResult
+{
+ ubyte[] array;
+ UTFEncoding encoding;
+ Endian endian;
+ uint bytesStripped = 0;
+}
+
+/** Convert byte order of an array encoded in UTF(8/16/32) to system endianness in place.
+ *
+ * Uses the UTF byte-order-mark (BOM) to determine UTF encoding. If there is no BOM
+ * at the beginning of array, UTF-8 is assumed (this is compatible with ASCII). The
+ * BOM, if any, will be removed from the buffer.
+ *
+ * If the encoding is determined to be UTF-16 or UTF-32 and there aren't enough bytes
+ * for the last code unit (i.e. if array.length is odd for UTF-16 or not divisible by
+ * 4 for UTF-32), the extra bytes (1 for UTF-16, 1-3 for UTF-32) are stripped.
+ *
+ * Note that this function does $(B not) check if the array is a valid UTF string. It
+ * only works with the BOM and 1,2 or 4-byte items.
+ *
+ * Params:
+ *
+ * array = The array with UTF-data.
+ *
+ * Returns:
+ *
+ * A struct with the following members:
+ *
+ * $(D ubyte[] array) A slice of the input array containing data in correct
+ * byte order, without BOM and in case of UTF-16/UTF-32,
+ * without stripped bytes, if any.
+ * $(D UTFEncoding encoding) Encoding of the result (UTF-8, UTF-16 or UTF-32)
+ * $(D std.system.Endian endian) Endianness of the original array.
+ * $(D uint bytesStripped) Number of bytes stripped from a UTF-16/UTF-32 array, if
+ * any. This is non-zero only if array.length was not
+ * divisible by 2 or 4 for UTF-16 and UTF-32, respectively.
+ *
+ * Complexity: (BIGOH array.length)
+ */
+auto fixUTFByteOrder(ubyte[] array) @safe @nogc pure nothrow
+{
+ // Enumerates UTF BOMs, matching indices to byteOrderMarks/bomEndian.
+ enum BOM: ubyte
+ {
+ UTF_8 = 0,
+ UTF_16_LE = 1,
+ UTF_16_BE = 2,
+ UTF_32_LE = 3,
+ UTF_32_BE = 4,
+ None = ubyte.max
+ }
+
+ // These 2 are from std.stream
+ static immutable ubyte[][5] byteOrderMarks = [ [0xEF, 0xBB, 0xBF],
+ [0xFF, 0xFE],
+ [0xFE, 0xFF],
+ [0xFF, 0xFE, 0x00, 0x00],
+ [0x00, 0x00, 0xFE, 0xFF] ];
+ static immutable Endian[5] bomEndian = [ endian,
+ Endian.littleEndian,
+ Endian.bigEndian,
+ Endian.littleEndian,
+ Endian.bigEndian ];
+
+ // Documented in function ddoc.
+
+ FixUTFByteOrderResult result;
+
+ // Detect BOM, if any, in the bytes we've read. -1 means no BOM.
+ // Need the last match: First 2 bytes of UTF-32LE BOM match the UTF-16LE BOM. If we
+ // used the first match, UTF-16LE would be detected when we have a UTF-32LE BOM.
+ import std.algorithm.searching : startsWith;
+ BOM bomId = BOM.None;
+ foreach (i, bom; byteOrderMarks)
+ if (array.startsWith(bom))
+ bomId = cast(BOM)i;
+
+ result.endian = (bomId != BOM.None) ? bomEndian[bomId] : Endian.init;
+
+ // Start of UTF data (after BOM, if any)
+ size_t start = 0;
+ // If we've read more than just the BOM, put the rest into the array.
+ with(BOM) final switch(bomId)
+ {
+ case None: result.encoding = UTFEncoding.UTF_8; break;
+ case UTF_8:
+ start = 3;
+ result.encoding = UTFEncoding.UTF_8;
+ break;
+ case UTF_16_LE, UTF_16_BE:
+ result.bytesStripped = array.length % 2;
+ start = 2;
+ result.encoding = UTFEncoding.UTF_16;
+ break;
+ case UTF_32_LE, UTF_32_BE:
+ result.bytesStripped = array.length % 4;
+ start = 4;
+ result.encoding = UTFEncoding.UTF_32;
+ break;
+ }
+
+ // If there's a BOM, we need to move data back to ensure it starts at array[0]
+ if (start != 0)
+ {
+ array = array[start .. $ - result.bytesStripped];
+ }
+
+ // We enforce above that array.length is divisible by 2/4 for UTF-16/32
+ if (endian != result.endian)
+ {
+ if (result.encoding == UTFEncoding.UTF_16)
+ swapByteOrder(cast(wchar[])array);
+ else if (result.encoding == UTFEncoding.UTF_32)
+ swapByteOrder(cast(dchar[])array);
+ }
+
+ result.array = array;
+ return result;
+}
+///
+@safe unittest
+{
+ {
+ ubyte[] s = [0xEF, 0xBB, 0xBF, 'a'];
+ FixUTFByteOrderResult r = fixUTFByteOrder(s);
+ assert(r.encoding == UTFEncoding.UTF_8);
+ assert(r.array.length == 1);
+ assert(r.array == ['a']);
+ assert(r.endian == Endian.littleEndian);
+ }
+
+ {
+ ubyte[] s = ['a'];
+ FixUTFByteOrderResult r = fixUTFByteOrder(s);
+ assert(r.encoding == UTFEncoding.UTF_8);
+ assert(r.array.length == 1);
+ assert(r.array == ['a']);
+ assert(r.endian == Endian.bigEndian);
+ }
+
+ {
+ // strip 'a' b/c not complete unit
+ ubyte[] s = [0xFE, 0xFF, 'a'];
+ FixUTFByteOrderResult r = fixUTFByteOrder(s);
+ assert(r.encoding == UTFEncoding.UTF_16);
+ assert(r.array.length == 0);
+ assert(r.endian == Endian.bigEndian);
+ }
+
+}
diff --git a/src/ext_depends/D-YAML/source/dyaml/token.d b/src/ext_depends/D-YAML/source/dyaml/token.d
new file mode 100644
index 0000000..5400a3f
--- /dev/null
+++ b/src/ext_depends/D-YAML/source/dyaml/token.d
@@ -0,0 +1,172 @@
+
+// Copyright Ferdinand Majerech 2011-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/// YAML tokens.
+/// Code based on PyYAML: http://www.pyyaml.org
+module dyaml.token;
+
+
+import std.conv;
+
+import dyaml.encoding;
+import dyaml.exception;
+import dyaml.reader;
+import dyaml.style;
+
+
+package:
+
+/// Token types.
+enum TokenID : ubyte
+{
+ // Invalid (uninitialized) token
+ invalid = 0,
+ directive,
+ documentStart,
+ documentEnd,
+ streamStart,
+ streamEnd,
+ blockSequenceStart,
+ blockMappingStart,
+ blockEnd,
+ flowSequenceStart,
+ flowMappingStart,
+ flowSequenceEnd,
+ flowMappingEnd,
+ key,
+ value,
+ blockEntry,
+ flowEntry,
+ alias_,
+ anchor,
+ tag,
+ scalar
+}
+
+/// Specifies the type of a tag directive token.
+enum DirectiveType : ubyte
+{
+ // YAML version directive.
+ yaml,
+ // Tag directive.
+ tag,
+ // Any other directive is "reserved" for future YAML versions.
+ reserved
+}
+
+/// Token produced by scanner.
+///
+/// 32 bytes on 64-bit.
+struct Token
+{
+ @disable int opCmp(ref Token);
+
+ // 16B
+ /// Value of the token, if any.
+ ///
+ /// Values are char[] instead of string, as Parser may still change them in a few
+ /// cases. Parser casts values to strings when producing Events.
+ char[] value;
+ // 4B
+ /// Start position of the token in file/stream.
+ Mark startMark;
+ // 4B
+ /// End position of the token in file/stream.
+ Mark endMark;
+ // 1B
+ /// Token type.
+ TokenID id;
+ // 1B
+ /// Style of scalar token, if this is a scalar token.
+ ScalarStyle style;
+ // 1B
+ /// Encoding, if this is a stream start token.
+ Encoding encoding;
+ // 1B
+ /// Type of directive for directiveToken.
+ DirectiveType directive;
+ // 4B
+ /// Used to split value into 2 substrings for tokens that need 2 values (tagToken)
+ uint valueDivider;
+
+ /// Get string representation of the token ID.
+ @property string idString() @safe pure const {return id.to!string;}
+}
+
+/// Construct a directive token.
+///
+/// Params: start = Start position of the token.
+/// end = End position of the token.
+/// value = Value of the token.
+/// directive = Directive type (YAML or TAG in YAML 1.1).
+/// nameEnd = Position of the end of the name
+Token directiveToken(const Mark start, const Mark end, char[] value,
+ DirectiveType directive, const uint nameEnd) @safe pure nothrow @nogc
+{
+ return Token(value, start, end, TokenID.directive, ScalarStyle.init, Encoding.init,
+ directive, nameEnd);
+}
+
+/// Construct a simple (no value) token with specified type.
+///
+/// Params: id = Type of the token.
+/// start = Start position of the token.
+/// end = End position of the token.
+Token simpleToken(TokenID id)(const Mark start, const Mark end)
+{
+ return Token(null, start, end, id);
+}
+
+/// Construct a stream start token.
+///
+/// Params: start = Start position of the token.
+/// end = End position of the token.
+/// encoding = Encoding of the stream.
+Token streamStartToken(const Mark start, const Mark end, const Encoding encoding) @safe pure nothrow @nogc
+{
+ return Token(null, start, end, TokenID.streamStart, ScalarStyle.invalid, encoding);
+}
+
+/// Aliases for construction of simple token types.
+alias streamEndToken = simpleToken!(TokenID.streamEnd);
+alias blockSequenceStartToken = simpleToken!(TokenID.blockSequenceStart);
+alias blockMappingStartToken = simpleToken!(TokenID.blockMappingStart);
+alias blockEndToken = simpleToken!(TokenID.blockEnd);
+alias keyToken = simpleToken!(TokenID.key);
+alias valueToken = simpleToken!(TokenID.value);
+alias blockEntryToken = simpleToken!(TokenID.blockEntry);
+alias flowEntryToken = simpleToken!(TokenID.flowEntry);
+
+/// Construct a simple token with value with specified type.
+///
+/// Params: id = Type of the token.
+/// start = Start position of the token.
+/// end = End position of the token.
+/// value = Value of the token.
+/// valueDivider = A hack for TagToken to store 2 values in value; the first
+/// value goes up to valueDivider, the second after it.
+Token simpleValueToken(TokenID id)(const Mark start, const Mark end, char[] value,
+ const uint valueDivider = uint.max)
+{
+ return Token(value, start, end, id, ScalarStyle.invalid, Encoding.init,
+ DirectiveType.init, valueDivider);
+}
+
+/// Alias for construction of tag token.
+alias tagToken = simpleValueToken!(TokenID.tag);
+alias aliasToken = simpleValueToken!(TokenID.alias_);
+alias anchorToken = simpleValueToken!(TokenID.anchor);
+
+/// Construct a scalar token.
+///
+/// Params: start = Start position of the token.
+/// end = End position of the token.
+/// value = Value of the token.
+/// style = Style of the token.
+Token scalarToken(const Mark start, const Mark end, char[] value, const ScalarStyle style) @safe pure nothrow @nogc
+{
+ return Token(value, start, end, TokenID.scalar, style);
+}