Commit f56d4530 authored by beder's avatar beder
Browse files

Added api sketch

parents
#ifndef REGEX_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define REGEX_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <vector>
#include <string>
namespace YAML
{
class Stream;
enum REGEX_OP { REGEX_EMPTY, REGEX_MATCH, REGEX_RANGE, REGEX_OR, REGEX_AND, REGEX_NOT, REGEX_SEQ };
// simplified regular expressions
// . Only straightforward matches (no repeated characters)
// . Only matches from start of string
class RegEx
{
public:
RegEx();
RegEx(char ch);
RegEx(char a, char z);
RegEx(const std::string& str, REGEX_OP op = REGEX_SEQ);
~RegEx() {}
friend RegEx operator ! (const RegEx& ex);
friend RegEx operator || (const RegEx& ex1, const RegEx& ex2);
friend RegEx operator && (const RegEx& ex1, const RegEx& ex2);
friend RegEx operator + (const RegEx& ex1, const RegEx& ex2);
bool Matches(char ch) const;
bool Matches(const std::string& str) const;
bool Matches(const Stream& in) const;
template <typename Source> bool Matches(const Source& source) const;
int Match(const std::string& str) const;
int Match(const Stream& in) const;
template <typename Source> int Match(const Source& source) const;
private:
RegEx(REGEX_OP op);
template <typename Source> bool IsValidSource(const Source& source) const;
template <typename Source> int MatchUnchecked(const Source& source) const;
template <typename Source> int MatchOpEmpty(const Source& source) const;
template <typename Source> int MatchOpMatch(const Source& source) const;
template <typename Source> int MatchOpRange(const Source& source) const;
template <typename Source> int MatchOpOr(const Source& source) const;
template <typename Source> int MatchOpAnd(const Source& source) const;
template <typename Source> int MatchOpNot(const Source& source) const;
template <typename Source> int MatchOpSeq(const Source& source) const;
private:
REGEX_OP m_op;
char m_a, m_z;
std::vector <RegEx> m_params;
};
}
#include "regeximpl.h"
#endif // REGEX_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#ifndef REGEXIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define REGEXIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include "stream.h"
#include "stringsource.h"
#include "streamcharsource.h"
namespace YAML
{
// query matches
inline bool RegEx::Matches(char ch) const {
std::string str;
str += ch;
return Matches(str);
}
inline bool RegEx::Matches(const std::string& str) const {
return Match(str) >= 0;
}
inline bool RegEx::Matches(const Stream& in) const {
return Match(in) >= 0;
}
template <typename Source>
inline bool RegEx::Matches(const Source& source) const {
return Match(source) >= 0;
}
// Match
// . Matches the given string against this regular expression.
// . Returns the number of characters matched.
// . Returns -1 if no characters were matched (the reason for
// not returning zero is that we may have an empty regex
// which is ALWAYS successful at matching zero characters).
// . REMEMBER that we only match from the start of the buffer!
inline int RegEx::Match(const std::string& str) const
{
StringCharSource source(str.c_str(), str.size());
return Match(source);
}
inline int RegEx::Match(const Stream& in) const
{
StreamCharSource source(in);
return Match(source);
}
template <typename Source>
inline bool RegEx::IsValidSource(const Source& source) const
{
return source;
}
template<>
inline bool RegEx::IsValidSource<StringCharSource>(const StringCharSource&source) const
{
switch(m_op) {
case REGEX_MATCH:
case REGEX_RANGE:
return source;
default:
return true;
}
}
template <typename Source>
inline int RegEx::Match(const Source& source) const
{
return IsValidSource(source) ? MatchUnchecked(source) : -1;
}
template <typename Source>
inline int RegEx::MatchUnchecked(const Source& source) const
{
switch(m_op) {
case REGEX_EMPTY:
return MatchOpEmpty(source);
case REGEX_MATCH:
return MatchOpMatch(source);
case REGEX_RANGE:
return MatchOpRange(source);
case REGEX_OR:
return MatchOpOr(source);
case REGEX_AND:
return MatchOpAnd(source);
case REGEX_NOT:
return MatchOpNot(source);
case REGEX_SEQ:
return MatchOpSeq(source);
}
return -1;
}
//////////////////////////////////////////////////////////////////////////////
// Operators
// Note: the convention MatchOp*<Source> is that we can assume IsSourceValid(source).
// So we do all our checks *before* we call these functions
// EmptyOperator
template <typename Source>
inline int RegEx::MatchOpEmpty(const Source& source) const {
return source[0] == Stream::eof() ? 0 : -1;
}
template <>
inline int RegEx::MatchOpEmpty<StringCharSource>(const StringCharSource& source) const {
return !source ? 0 : -1; // the empty regex only is successful on the empty string
}
// MatchOperator
template <typename Source>
inline int RegEx::MatchOpMatch(const Source& source) const {
if(source[0] != m_a)
return -1;
return 1;
}
// RangeOperator
template <typename Source>
inline int RegEx::MatchOpRange(const Source& source) const {
if(m_a > source[0] || m_z < source[0])
return -1;
return 1;
}
// OrOperator
template <typename Source>
inline int RegEx::MatchOpOr(const Source& source) const {
for(std::size_t i=0;i<m_params.size();i++) {
int n = m_params[i].MatchUnchecked(source);
if(n >= 0)
return n;
}
return -1;
}
// AndOperator
// Note: 'AND' is a little funny, since we may be required to match things
// of different lengths. If we find a match, we return the length of
// the FIRST entry on the list.
template <typename Source>
inline int RegEx::MatchOpAnd(const Source& source) const {
int first = -1;
for(std::size_t i=0;i<m_params.size();i++) {
int n = m_params[i].MatchUnchecked(source);
if(n == -1)
return -1;
if(i == 0)
first = n;
}
return first;
}
// NotOperator
template <typename Source>
inline int RegEx::MatchOpNot(const Source& source) const {
if(m_params.empty())
return -1;
if(m_params[0].MatchUnchecked(source) >= 0)
return -1;
return 1;
}
// SeqOperator
template <typename Source>
inline int RegEx::MatchOpSeq(const Source& source) const {
int offset = 0;
for(std::size_t i=0;i<m_params.size();i++) {
int n = m_params[i].Match(source + offset); // note Match, not MatchUnchecked because we need to check validity after the offset
if(n == -1)
return -1;
offset += n;
}
return offset;
}
}
#endif // REGEXIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "scanner.h"
#include "token.h"
#include "yaml-cpp/exceptions.h"
#include "exp.h"
#include <cassert>
#include <memory>
namespace YAML
{
Scanner::Scanner(std::istream& in)
: INPUT(in), m_startedStream(false), m_endedStream(false), m_simpleKeyAllowed(false), m_canBeJSONFlow(false)
{
}
Scanner::~Scanner()
{
}
// empty
// . Returns true if there are no more tokens to be read
bool Scanner::empty()
{
EnsureTokensInQueue();
return m_tokens.empty();
}
// pop
// . Simply removes the next token on the queue.
void Scanner::pop()
{
EnsureTokensInQueue();
if(!m_tokens.empty())
m_tokens.pop();
}
// peek
// . Returns (but does not remove) the next token on the queue.
Token& Scanner::peek()
{
EnsureTokensInQueue();
assert(!m_tokens.empty()); // should we be asserting here? I mean, we really just be checking
// if it's empty before peeking.
#if 0
static Token *pLast = 0;
if(pLast != &m_tokens.front())
std::cerr << "peek: " << m_tokens.front() << "\n";
pLast = &m_tokens.front();
#endif
return m_tokens.front();
}
// EnsureTokensInQueue
// . Scan until there's a valid token at the front of the queue,
// or we're sure the queue is empty.
void Scanner::EnsureTokensInQueue()
{
while(1) {
if(!m_tokens.empty()) {
Token& token = m_tokens.front();
// if this guy's valid, then we're done
if(token.status == Token::VALID)
return;
// here's where we clean up the impossible tokens
if(token.status == Token::INVALID) {
m_tokens.pop();
continue;
}
// note: what's left are the unverified tokens
}
// no token? maybe we've actually finished
if(m_endedStream)
return;
// no? then scan...
ScanNextToken();
}
}
// ScanNextToken
// . The main scanning function; here we branch out and
// scan whatever the next token should be.
void Scanner::ScanNextToken()
{
if(m_endedStream)
return;
if(!m_startedStream)
return StartStream();
// get rid of whitespace, etc. (in between tokens it should be irrelevent)
ScanToNextToken();
// maybe need to end some blocks
PopIndentToHere();
// *****
// And now branch based on the next few characters!
// *****
// end of stream
if(!INPUT)
return EndStream();
if(INPUT.column() == 0 && INPUT.peek() == Keys::Directive)
return ScanDirective();
// document token
if(INPUT.column() == 0 && Exp::DocStart().Matches(INPUT))
return ScanDocStart();
if(INPUT.column() == 0 && Exp::DocEnd().Matches(INPUT))
return ScanDocEnd();
// flow start/end/entry
if(INPUT.peek() == Keys::FlowSeqStart || INPUT.peek() == Keys::FlowMapStart)
return ScanFlowStart();
if(INPUT.peek() == Keys::FlowSeqEnd || INPUT.peek() == Keys::FlowMapEnd)
return ScanFlowEnd();
if(INPUT.peek() == Keys::FlowEntry)
return ScanFlowEntry();
// block/map stuff
if(Exp::BlockEntry().Matches(INPUT))
return ScanBlockEntry();
if((InBlockContext() ? Exp::Key() : Exp::KeyInFlow()).Matches(INPUT))
return ScanKey();
if(GetValueRegex().Matches(INPUT))
return ScanValue();
// alias/anchor
if(INPUT.peek() == Keys::Alias || INPUT.peek() == Keys::Anchor)
return ScanAnchorOrAlias();
// tag
if(INPUT.peek() == Keys::Tag)
return ScanTag();
// special scalars
if(InBlockContext() && (INPUT.peek() == Keys::LiteralScalar || INPUT.peek() == Keys::FoldedScalar))
return ScanBlockScalar();
if(INPUT.peek() == '\'' || INPUT.peek() == '\"')
return ScanQuotedScalar();
// plain scalars
if((InBlockContext() ? Exp::PlainScalar() : Exp::PlainScalarInFlow()).Matches(INPUT))
return ScanPlainScalar();
// don't know what it is!
throw ParserException(INPUT.mark(), ErrorMsg::UNKNOWN_TOKEN);
}
// ScanToNextToken
// . Eats input until we reach the next token-like thing.
void Scanner::ScanToNextToken()
{
while(1) {
// first eat whitespace
while(INPUT && IsWhitespaceToBeEaten(INPUT.peek())) {
if(InBlockContext() && Exp::Tab().Matches(INPUT))
m_simpleKeyAllowed = false;
INPUT.eat(1);
}
// then eat a comment
if(Exp::Comment().Matches(INPUT)) {
// eat until line break
while(INPUT && !Exp::Break().Matches(INPUT))
INPUT.eat(1);
}
// if it's NOT a line break, then we're done!
if(!Exp::Break().Matches(INPUT))
break;
// otherwise, let's eat the line break and keep going
int n = Exp::Break().Match(INPUT);
INPUT.eat(n);
// oh yeah, and let's get rid of that simple key
InvalidateSimpleKey();
// new line - we may be able to accept a simple key now
if(InBlockContext())
m_simpleKeyAllowed = true;
}
}
///////////////////////////////////////////////////////////////////////
// Misc. helpers
// IsWhitespaceToBeEaten
// . We can eat whitespace if it's a space or tab
// . Note: originally tabs in block context couldn't be eaten
// "where a simple key could be allowed
// (i.e., not at the beginning of a line, or following '-', '?', or ':')"
// I think this is wrong, since tabs can be non-content whitespace; it's just
// that they can't contribute to indentation, so once you've seen a tab in a
// line, you can't start a simple key
bool Scanner::IsWhitespaceToBeEaten(char ch)
{
if(ch == ' ')
return true;
if(ch == '\t')
return true;
return false;
}
// GetValueRegex
// . Get the appropriate regex to check if it's a value token
const RegEx& Scanner::GetValueRegex() const
{
if(InBlockContext())
return Exp::Value();
return m_canBeJSONFlow ? Exp::ValueInJSONFlow() : Exp::ValueInFlow();
}
// StartStream
// . Set the initial conditions for starting a stream.
void Scanner::StartStream()
{
m_startedStream = true;
m_simpleKeyAllowed = true;
std::auto_ptr<IndentMarker> pIndent(new IndentMarker(-1, IndentMarker::NONE));
m_indentRefs.push_back(pIndent);
m_indents.push(&m_indentRefs.back());
}
// EndStream
// . Close out the stream, finish up, etc.
void Scanner::EndStream()
{
// force newline
if(INPUT.column() > 0)
INPUT.ResetColumn();
PopAllIndents();
PopAllSimpleKeys();
m_simpleKeyAllowed = false;
m_endedStream = true;
}
Token *Scanner::PushToken(Token::TYPE type)
{
m_tokens.push(Token(type, INPUT.mark()));
return &m_tokens.back();
}
Token::TYPE Scanner::GetStartTokenFor(IndentMarker::INDENT_TYPE type) const
{
switch(type) {
case IndentMarker::SEQ: return Token::BLOCK_SEQ_START;
case IndentMarker::MAP: return Token::BLOCK_MAP_START;
case IndentMarker::NONE: assert(false); break;
}
assert(false);
throw std::runtime_error("yaml-cpp: internal error, invalid indent type");
}
// PushIndentTo
// . Pushes an indentation onto the stack, and enqueues the
// proper token (sequence start or mapping start).
// . Returns the indent marker it generates (if any).
Scanner::IndentMarker *Scanner::PushIndentTo(int column, IndentMarker::INDENT_TYPE type)
{
// are we in flow?
if(InFlowContext())
return 0;
std::auto_ptr<IndentMarker> pIndent(new IndentMarker(column, type));
IndentMarker& indent = *pIndent;
const IndentMarker& lastIndent = *m_indents.top();
// is this actually an indentation?
if(indent.column < lastIndent.column)
return 0;
if(indent.column == lastIndent.column && !(indent.type == IndentMarker::SEQ && lastIndent.type == IndentMarker::MAP))
return 0;
// push a start token
indent.pStartToken = PushToken(GetStartTokenFor(type));
// and then the indent
m_indents.push(&indent);
m_indentRefs.push_back(pIndent);
return &m_indentRefs.back();
}
// PopIndentToHere
// . Pops indentations off the stack until we reach the current indentation level,
// and enqueues the proper token each time.
// . Then pops all invalid indentations off.
void Scanner::PopIndentToHere()
{
// are we in flow?
if(InFlowContext())
return;
// now pop away
while(!m_indents.empty()) {
const IndentMarker& indent = *m_indents.top();
if(indent.column < INPUT.column())
break;
if(indent.column == INPUT.column() && !(indent.type == IndentMarker::SEQ && !Exp::BlockEntry().Matches(INPUT)))
break;
PopIndent();
}
while(!m_indents.empty() && m_indents.top()->status == IndentMarker::INVALID)
PopIndent();
}
// PopAllIndents
// . Pops all indentations (except for the base empty one) off the stack,
// and enqueues the proper token each time.
void Scanner::PopAllIndents()
{
// are we in flow?
if(InFlowContext())
return;
// now pop away
while(!m_indents.empty()) {
const IndentMarker& indent = *m_indents.top();
if(indent.type == IndentMarker::NONE)
break;
PopIndent();
}
}
// PopIndent
// . Pops a single indent, pushing the proper token
void Scanner::PopIndent()
{
const IndentMarker& indent = *m_indents.top();
m_indents.pop();
if(indent.status != IndentMarker::VALID) {
InvalidateSimpleKey();
return;
}
if(indent.type == IndentMarker::SEQ)
m_tokens.push(Token(Token::BLOCK_SEQ_END, INPUT.mark()));
else if(indent.type == IndentMarker::MAP)
m_tokens.push(Token(Token::BLOCK_MAP_END, INPUT.mark()));
}
// GetTopIndent
int Scanner::GetTopIndent() const
{
if(m_indents.empty())
return 0;
return m_indents.top()->column;
}
// ThrowParserException
// . Throws a ParserException with the current token location
// (if available).
// . Does not parse any more tokens.
void Scanner::ThrowParserException(const std::string& msg) const
{
Mark mark = Mark::null();
if(!m_tokens.empty()) {
const Token& token = m_tokens.front();
mark = token.mark;
}
throw ParserException(mark, msg);
}
}
#ifndef SCANNER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define SCANNER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <ios>
#include <string>
#include <queue>
#include <stack>
#include <set>
#include <map>
#include "ptr_vector.h"
#include "stream.h"
#include "token.h"
namespace YAML
{
class Node;
class RegEx;
class Scanner
{
public:
Scanner(std::istream& in);
~Scanner();
// token queue management (hopefully this looks kinda stl-ish)
bool empty();
void pop();
Token& peek();
private:
struct IndentMarker {
enum INDENT_TYPE { MAP, SEQ, NONE };
enum STATUS { VALID, INVALID, UNKNOWN };
IndentMarker(int column_, INDENT_TYPE type_): column(column_), type(type_), status(VALID), pStartToken(0) {}
int column;
INDENT_TYPE type;
STATUS status;
Token *pStartToken;
};
enum FLOW_MARKER { FLOW_MAP, FLOW_SEQ };
private:
// scanning
void EnsureTokensInQueue();
void ScanNextToken();
void ScanToNextToken();
void StartStream();
void EndStream();
Token *PushToken(Token::TYPE type);
bool InFlowContext() const { return !m_flows.empty(); }
bool InBlockContext() const { return m_flows.empty(); }
int GetFlowLevel() const { return m_flows.size(); }
Token::TYPE GetStartTokenFor(IndentMarker::INDENT_TYPE type) const;
IndentMarker *PushIndentTo(int column, IndentMarker::INDENT_TYPE type);
void PopIndentToHere();
void PopAllIndents();
void PopIndent();
int GetTopIndent() const;
// checking input
bool CanInsertPotentialSimpleKey() const;
bool ExistsActiveSimpleKey() const;
void InsertPotentialSimpleKey();
void InvalidateSimpleKey();
bool VerifySimpleKey();
void PopAllSimpleKeys();
void ThrowParserException(const std::string& msg) const;
bool IsWhitespaceToBeEaten(char ch);
const RegEx& GetValueRegex() const;
struct SimpleKey {
SimpleKey(const Mark& mark_, int flowLevel_);
void Validate();
void Invalidate();
Mark mark;
int flowLevel;
IndentMarker *pIndent;
Token *pMapStart, *pKey;
};
// and the tokens
void ScanDirective();
void ScanDocStart();
void ScanDocEnd();
void ScanBlockSeqStart();
void ScanBlockMapSTart();
void ScanBlockEnd();
void ScanBlockEntry();
void ScanFlowStart();
void ScanFlowEnd();
void ScanFlowEntry();
void ScanKey();
void ScanValue();
void ScanAnchorOrAlias();
void ScanTag();
void ScanPlainScalar();
void ScanQuotedScalar();
void ScanBlockScalar();
private:
// the stream
Stream INPUT;
// the output (tokens)
std::queue<Token> m_tokens;
// state info
bool m_startedStream, m_endedStream;
bool m_simpleKeyAllowed;
bool m_canBeJSONFlow;
std::stack<SimpleKey> m_simpleKeys;
std::stack<IndentMarker *> m_indents;
ptr_vector<IndentMarker> m_indentRefs; // for "garbage collection"
std::stack<FLOW_MARKER> m_flows;
};
}
#endif // SCANNER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "scanscalar.h"
#include "scanner.h"
#include "exp.h"
#include "yaml-cpp/exceptions.h"
#include "token.h"
namespace YAML
{
// ScanScalar
// . This is where the scalar magic happens.
//
// . We do the scanning in three phases:
// 1. Scan until newline
// 2. Eat newline
// 3. Scan leading blanks.
//
// . Depending on the parameters given, we store or stop
// and different places in the above flow.
std::string ScanScalar(Stream& INPUT, ScanScalarParams& params)
{
bool foundNonEmptyLine = false;
bool pastOpeningBreak = (params.fold == FOLD_FLOW);
bool emptyLine = false, moreIndented = false;
int foldedNewlineCount = 0;
bool foldedNewlineStartedMoreIndented = false;
std::string scalar;
params.leadingSpaces = false;
while(INPUT) {
// ********************************
// Phase #1: scan until line ending
std::size_t lastNonWhitespaceChar = scalar.size();
bool escapedNewline = false;
while(!params.end.Matches(INPUT) && !Exp::Break().Matches(INPUT)) {
if(!INPUT)
break;
// document indicator?
if(INPUT.column() == 0 && Exp::DocIndicator().Matches(INPUT)) {
if(params.onDocIndicator == BREAK)
break;
else if(params.onDocIndicator == THROW)
throw ParserException(INPUT.mark(), ErrorMsg::DOC_IN_SCALAR);
}
foundNonEmptyLine = true;
pastOpeningBreak = true;
// escaped newline? (only if we're escaping on slash)
if(params.escape == '\\' && Exp::EscBreak().Matches(INPUT)) {
// eat escape character and get out (but preserve trailing whitespace!)
INPUT.get();
lastNonWhitespaceChar = scalar.size();
escapedNewline = true;
break;
}
// escape this?
if(INPUT.peek() == params.escape) {
scalar += Exp::Escape(INPUT);
lastNonWhitespaceChar = scalar.size();
continue;
}
// otherwise, just add the damn character
char ch = INPUT.get();
scalar += ch;
if(ch != ' ' && ch != '\t')
lastNonWhitespaceChar = scalar.size();
}
// eof? if we're looking to eat something, then we throw
if(!INPUT) {
if(params.eatEnd)
throw ParserException(INPUT.mark(), ErrorMsg::EOF_IN_SCALAR);
break;
}
// doc indicator?
if(params.onDocIndicator == BREAK && INPUT.column() == 0 && Exp::DocIndicator().Matches(INPUT))
break;
// are we done via character match?
int n = params.end.Match(INPUT);
if(n >= 0) {
if(params.eatEnd)
INPUT.eat(n);
break;
}
// do we remove trailing whitespace?
if(params.fold == FOLD_FLOW)
scalar.erase(lastNonWhitespaceChar);
// ********************************
// Phase #2: eat line ending
n = Exp::Break().Match(INPUT);
INPUT.eat(n);
// ********************************
// Phase #3: scan initial spaces
// first the required indentation
while(INPUT.peek() == ' ' && (INPUT.column() < params.indent || (params.detectIndent && !foundNonEmptyLine)))
INPUT.eat(1);
// update indent if we're auto-detecting
if(params.detectIndent && !foundNonEmptyLine)
params.indent = std::max(params.indent, INPUT.column());
// and then the rest of the whitespace
while(Exp::Blank().Matches(INPUT)) {
// we check for tabs that masquerade as indentation
if(INPUT.peek() == '\t'&& INPUT.column() < params.indent && params.onTabInIndentation == THROW)
throw ParserException(INPUT.mark(), ErrorMsg::TAB_IN_INDENTATION);
if(!params.eatLeadingWhitespace)
break;
INPUT.eat(1);
}
// was this an empty line?
bool nextEmptyLine = Exp::Break().Matches(INPUT);
bool nextMoreIndented = Exp::Blank().Matches(INPUT);
if(params.fold == FOLD_BLOCK && foldedNewlineCount == 0 && nextEmptyLine)
foldedNewlineStartedMoreIndented = moreIndented;
// for block scalars, we always start with a newline, so we should ignore it (not fold or keep)
if(pastOpeningBreak) {
switch(params.fold) {
case DONT_FOLD:
scalar += "\n";
break;
case FOLD_BLOCK:
if(!emptyLine && !nextEmptyLine && !moreIndented && !nextMoreIndented && INPUT.column() >= params.indent)
scalar += " ";
else if(nextEmptyLine)
foldedNewlineCount++;
else
scalar += "\n";
if(!nextEmptyLine && foldedNewlineCount > 0) {
scalar += std::string(foldedNewlineCount - 1, '\n');
if(foldedNewlineStartedMoreIndented || nextMoreIndented | !foundNonEmptyLine)
scalar += "\n";
foldedNewlineCount = 0;
}
break;
case FOLD_FLOW:
if(nextEmptyLine)
scalar += "\n";
else if(!emptyLine && !nextEmptyLine && !escapedNewline)
scalar += " ";
break;
}
}
emptyLine = nextEmptyLine;
moreIndented = nextMoreIndented;
pastOpeningBreak = true;
// are we done via indentation?
if(!emptyLine && INPUT.column() < params.indent) {
params.leadingSpaces = true;
break;
}
}
// post-processing
if(params.trimTrailingSpaces) {
std::size_t pos = scalar.find_last_not_of(' ');
if(pos < scalar.size())
scalar.erase(pos + 1);
}
switch(params.chomp) {
case CLIP: {
const std::size_t pos = scalar.find_last_not_of('\n');
if(pos == std::string::npos)
scalar.erase();
else if(pos + 1 < scalar.size())
scalar.erase(pos + 2);
} break;
case STRIP: {
const std::size_t pos = scalar.find_last_not_of('\n');
if(pos == std::string::npos)
scalar.erase();
else if(pos < scalar.size())
scalar.erase(pos + 1);
} break;
default:
break;
}
return scalar;
}
}
#ifndef SCANSCALAR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define SCANSCALAR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <string>
#include "regex.h"
#include "stream.h"
namespace YAML
{
enum CHOMP { STRIP = -1, CLIP, KEEP };
enum ACTION { NONE, BREAK, THROW };
enum FOLD { DONT_FOLD, FOLD_BLOCK, FOLD_FLOW };
struct ScanScalarParams {
ScanScalarParams(): eatEnd(false), indent(0), detectIndent(false), eatLeadingWhitespace(0), escape(0), fold(DONT_FOLD),
trimTrailingSpaces(0), chomp(CLIP), onDocIndicator(NONE), onTabInIndentation(NONE), leadingSpaces(false) {}
// input:
RegEx end; // what condition ends this scalar?
bool eatEnd; // should we eat that condition when we see it?
int indent; // what level of indentation should be eaten and ignored?
bool detectIndent; // should we try to autodetect the indent?
bool eatLeadingWhitespace; // should we continue eating this delicious indentation after 'indent' spaces?
char escape; // what character do we escape on (i.e., slash or single quote) (0 for none)
FOLD fold; // how do we fold line ends?
bool trimTrailingSpaces; // do we remove all trailing spaces (at the very end)
CHOMP chomp; // do we strip, clip, or keep trailing newlines (at the very end)
// Note: strip means kill all, clip means keep at most one, keep means keep all
ACTION onDocIndicator; // what do we do if we see a document indicator?
ACTION onTabInIndentation; // what do we do if we see a tab where we should be seeing indentation spaces
// output:
bool leadingSpaces;
};
std::string ScanScalar(Stream& INPUT, ScanScalarParams& info);
}
#endif // SCANSCALAR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "scanner.h"
#include "regex.h"
#include "exp.h"
#include "yaml-cpp/exceptions.h"
namespace YAML
{
const std::string ScanVerbatimTag(Stream& INPUT)
{
std::string tag;
// eat the start character
INPUT.get();
while(INPUT) {
if(INPUT.peek() == Keys::VerbatimTagEnd) {
// eat the end character
INPUT.get();
return tag;
}
int n = Exp::URI().Match(INPUT);
if(n <= 0)
break;
tag += INPUT.get(n);
}
throw ParserException(INPUT.mark(), ErrorMsg::END_OF_VERBATIM_TAG);
}
const std::string ScanTagHandle(Stream& INPUT, bool& canBeHandle)
{
std::string tag;
canBeHandle = true;
Mark firstNonWordChar;
while(INPUT) {
if(INPUT.peek() == Keys::Tag) {
if(!canBeHandle)
throw ParserException(firstNonWordChar, ErrorMsg::CHAR_IN_TAG_HANDLE);
break;
}
int n = 0;
if(canBeHandle) {
n = Exp::Word().Match(INPUT);
if(n <= 0) {
canBeHandle = false;
firstNonWordChar = INPUT.mark();
}
}
if(!canBeHandle)
n = Exp::Tag().Match(INPUT);
if(n <= 0)
break;
tag += INPUT.get(n);
}
return tag;
}
const std::string ScanTagSuffix(Stream& INPUT)
{
std::string tag;
while(INPUT) {
int n = Exp::Tag().Match(INPUT);
if(n <= 0)
break;
tag += INPUT.get(n);
}
if(tag.empty())
throw ParserException(INPUT.mark(), ErrorMsg::TAG_WITH_NO_SUFFIX);
return tag;
}
}
#ifndef SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <string>
#include "stream.h"
namespace YAML
{
const std::string ScanVerbatimTag(Stream& INPUT);
const std::string ScanTagHandle(Stream& INPUT, bool& canBeHandle);
const std::string ScanTagSuffix(Stream& INPUT);
}
#endif // SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "scanner.h"
#include "token.h"
#include "yaml-cpp/exceptions.h"
#include "exp.h"
#include "scanscalar.h"
#include "scantag.h"
#include "tag.h"
#include <sstream>
namespace YAML
{
///////////////////////////////////////////////////////////////////////
// Specialization for scanning specific tokens
// Directive
// . Note: no semantic checking is done here (that's for the parser to do)
void Scanner::ScanDirective()
{
std::string name;
std::vector <std::string> params;
// pop indents and simple keys
PopAllIndents();
PopAllSimpleKeys();
m_simpleKeyAllowed = false;
m_canBeJSONFlow = false;
// store pos and eat indicator
Token token(Token::DIRECTIVE, INPUT.mark());
INPUT.eat(1);
// read name
while(INPUT && !Exp::BlankOrBreak().Matches(INPUT))
token.value += INPUT.get();
// read parameters
while(1) {
// first get rid of whitespace
while(Exp::Blank().Matches(INPUT))
INPUT.eat(1);
// break on newline or comment
if(!INPUT || Exp::Break().Matches(INPUT) || Exp::Comment().Matches(INPUT))
break;
// now read parameter
std::string param;
while(INPUT && !Exp::BlankOrBreak().Matches(INPUT))
param += INPUT.get();
token.params.push_back(param);
}
m_tokens.push(token);
}
// DocStart
void Scanner::ScanDocStart()
{
PopAllIndents();
PopAllSimpleKeys();
m_simpleKeyAllowed = false;
m_canBeJSONFlow = false;
// eat
Mark mark = INPUT.mark();
INPUT.eat(3);
m_tokens.push(Token(Token::DOC_START, mark));
}
// DocEnd
void Scanner::ScanDocEnd()
{
PopAllIndents();
PopAllSimpleKeys();
m_simpleKeyAllowed = false;
m_canBeJSONFlow = false;
// eat
Mark mark = INPUT.mark();
INPUT.eat(3);
m_tokens.push(Token(Token::DOC_END, mark));
}
// FlowStart
void Scanner::ScanFlowStart()
{
// flows can be simple keys
InsertPotentialSimpleKey();
m_simpleKeyAllowed = true;
m_canBeJSONFlow = false;
// eat
Mark mark = INPUT.mark();
char ch = INPUT.get();
FLOW_MARKER flowType = (ch == Keys::FlowSeqStart ? FLOW_SEQ : FLOW_MAP);
m_flows.push(flowType);
Token::TYPE type = (flowType == FLOW_SEQ ? Token::FLOW_SEQ_START : Token::FLOW_MAP_START);
m_tokens.push(Token(type, mark));
}
// FlowEnd
void Scanner::ScanFlowEnd()
{
if(InBlockContext())
throw ParserException(INPUT.mark(), ErrorMsg::FLOW_END);
// we might have a solo entry in the flow context
if(InFlowContext()) {
if(m_flows.top() == FLOW_MAP && VerifySimpleKey())
m_tokens.push(Token(Token::VALUE, INPUT.mark()));
else if(m_flows.top() == FLOW_SEQ)
InvalidateSimpleKey();
}
m_simpleKeyAllowed = false;
m_canBeJSONFlow = true;
// eat
Mark mark = INPUT.mark();
char ch = INPUT.get();
// check that it matches the start
FLOW_MARKER flowType = (ch == Keys::FlowSeqEnd ? FLOW_SEQ : FLOW_MAP);
if(m_flows.top() != flowType)
throw ParserException(mark, ErrorMsg::FLOW_END);
m_flows.pop();
Token::TYPE type = (flowType ? Token::FLOW_SEQ_END : Token::FLOW_MAP_END);
m_tokens.push(Token(type, mark));
}
// FlowEntry
void Scanner::ScanFlowEntry()
{
// we might have a solo entry in the flow context
if(InFlowContext()) {
if(m_flows.top() == FLOW_MAP && VerifySimpleKey())
m_tokens.push(Token(Token::VALUE, INPUT.mark()));
else if(m_flows.top() == FLOW_SEQ)
InvalidateSimpleKey();
}
m_simpleKeyAllowed = true;
m_canBeJSONFlow = false;
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::FLOW_ENTRY, mark));
}
// BlockEntry
void Scanner::ScanBlockEntry()
{
// we better be in the block context!
if(InFlowContext())
throw ParserException(INPUT.mark(), ErrorMsg::BLOCK_ENTRY);
// can we put it here?
if(!m_simpleKeyAllowed)
throw ParserException(INPUT.mark(), ErrorMsg::BLOCK_ENTRY);
PushIndentTo(INPUT.column(), IndentMarker::SEQ);
m_simpleKeyAllowed = true;
m_canBeJSONFlow = false;
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::BLOCK_ENTRY, mark));
}
// Key
void Scanner::ScanKey()
{
// handle keys diffently in the block context (and manage indents)
if(InBlockContext()) {
if(!m_simpleKeyAllowed)
throw ParserException(INPUT.mark(), ErrorMsg::MAP_KEY);
PushIndentTo(INPUT.column(), IndentMarker::MAP);
}
// can only put a simple key here if we're in block context
m_simpleKeyAllowed = InBlockContext();
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::KEY, mark));
}
// Value
void Scanner::ScanValue()
{
// and check that simple key
bool isSimpleKey = VerifySimpleKey();
m_canBeJSONFlow = false;
if(isSimpleKey) {
// can't follow a simple key with another simple key (dunno why, though - it seems fine)
m_simpleKeyAllowed = false;
} else {
// handle values diffently in the block context (and manage indents)
if(InBlockContext()) {
if(!m_simpleKeyAllowed)
throw ParserException(INPUT.mark(), ErrorMsg::MAP_VALUE);
PushIndentTo(INPUT.column(), IndentMarker::MAP);
}
// can only put a simple key here if we're in block context
m_simpleKeyAllowed = InBlockContext();
}
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::VALUE, mark));
}
// AnchorOrAlias
void Scanner::ScanAnchorOrAlias()
{
bool alias;
std::string name;
// insert a potential simple key
InsertPotentialSimpleKey();
m_simpleKeyAllowed = false;
m_canBeJSONFlow = false;
// eat the indicator
Mark mark = INPUT.mark();
char indicator = INPUT.get();
alias = (indicator == Keys::Alias);
// now eat the content
while(INPUT && Exp::Anchor().Matches(INPUT))
name += INPUT.get();
// we need to have read SOMETHING!
if(name.empty())
throw ParserException(INPUT.mark(), alias ? ErrorMsg::ALIAS_NOT_FOUND : ErrorMsg::ANCHOR_NOT_FOUND);
// and needs to end correctly
if(INPUT && !Exp::AnchorEnd().Matches(INPUT))
throw ParserException(INPUT.mark(), alias ? ErrorMsg::CHAR_IN_ALIAS : ErrorMsg::CHAR_IN_ANCHOR);
// and we're done
Token token(alias ? Token::ALIAS : Token::ANCHOR, mark);
token.value = name;
m_tokens.push(token);
}
// Tag
void Scanner::ScanTag()
{
// insert a potential simple key
InsertPotentialSimpleKey();
m_simpleKeyAllowed = false;
m_canBeJSONFlow = false;
Token token(Token::TAG, INPUT.mark());
// eat the indicator
INPUT.get();
if(INPUT && INPUT.peek() == Keys::VerbatimTagStart){
std::string tag = ScanVerbatimTag(INPUT);
token.value = tag;
token.data = Tag::VERBATIM;
} else {
bool canBeHandle;
token.value = ScanTagHandle(INPUT, canBeHandle);
if(!canBeHandle && token.value.empty())
token.data = Tag::NON_SPECIFIC;
else if(token.value.empty())
token.data = Tag::SECONDARY_HANDLE;
else
token.data = Tag::PRIMARY_HANDLE;
// is there a suffix?
if(canBeHandle && INPUT.peek() == Keys::Tag) {
// eat the indicator
INPUT.get();
token.params.push_back(ScanTagSuffix(INPUT));
token.data = Tag::NAMED_HANDLE;
}
}
m_tokens.push(token);
}
// PlainScalar
void Scanner::ScanPlainScalar()
{
std::string scalar;
// set up the scanning parameters
ScanScalarParams params;
params.end = (InFlowContext() ? Exp::EndScalarInFlow() : Exp::EndScalar()) || (Exp::BlankOrBreak() + Exp::Comment());
params.eatEnd = false;
params.indent = (InFlowContext() ? 0 : GetTopIndent() + 1);
params.fold = FOLD_FLOW;
params.eatLeadingWhitespace = true;
params.trimTrailingSpaces = true;
params.chomp = STRIP;
params.onDocIndicator = BREAK;
params.onTabInIndentation = THROW;
// insert a potential simple key
InsertPotentialSimpleKey();
Mark mark = INPUT.mark();
scalar = ScanScalar(INPUT, params);
// can have a simple key only if we ended the scalar by starting a new line
m_simpleKeyAllowed = params.leadingSpaces;
m_canBeJSONFlow = false;
// finally, check and see if we ended on an illegal character
//if(Exp::IllegalCharInScalar.Matches(INPUT))
// throw ParserException(INPUT.mark(), ErrorMsg::CHAR_IN_SCALAR);
Token token(Token::PLAIN_SCALAR, mark);
token.value = scalar;
m_tokens.push(token);
}
// QuotedScalar
void Scanner::ScanQuotedScalar()
{
std::string scalar;
// peek at single or double quote (don't eat because we need to preserve (for the time being) the input position)
char quote = INPUT.peek();
bool single = (quote == '\'');
// setup the scanning parameters
ScanScalarParams params;
params.end = (single ? RegEx(quote) && !Exp::EscSingleQuote() : RegEx(quote));
params.eatEnd = true;
params.escape = (single ? '\'' : '\\');
params.indent = 0;
params.fold = FOLD_FLOW;
params.eatLeadingWhitespace = true;
params.trimTrailingSpaces = false;
params.chomp = CLIP;
params.onDocIndicator = THROW;
// insert a potential simple key
InsertPotentialSimpleKey();
Mark mark = INPUT.mark();
// now eat that opening quote
INPUT.get();
// and scan
scalar = ScanScalar(INPUT, params);
m_simpleKeyAllowed = false;
m_canBeJSONFlow = true;
Token token(Token::NON_PLAIN_SCALAR, mark);
token.value = scalar;
m_tokens.push(token);
}
// BlockScalarToken
// . These need a little extra processing beforehand.
// . We need to scan the line where the indicator is (this doesn't count as part of the scalar),
// and then we need to figure out what level of indentation we'll be using.
void Scanner::ScanBlockScalar()
{
std::string scalar;
ScanScalarParams params;
params.indent = 1;
params.detectIndent = true;
// eat block indicator ('|' or '>')
Mark mark = INPUT.mark();
char indicator = INPUT.get();
params.fold = (indicator == Keys::FoldedScalar ? FOLD_BLOCK : DONT_FOLD);
// eat chomping/indentation indicators
params.chomp = CLIP;
int n = Exp::Chomp().Match(INPUT);
for(int i=0;i<n;i++) {
char ch = INPUT.get();
if(ch == '+')
params.chomp = KEEP;
else if(ch == '-')
params.chomp = STRIP;
else if(Exp::Digit().Matches(ch)) {
if(ch == '0')
throw ParserException(INPUT.mark(), ErrorMsg::ZERO_INDENT_IN_BLOCK);
params.indent = ch - '0';
params.detectIndent = false;
}
}
// now eat whitespace
while(Exp::Blank().Matches(INPUT))
INPUT.eat(1);
// and comments to the end of the line
if(Exp::Comment().Matches(INPUT))
while(INPUT && !Exp::Break().Matches(INPUT))
INPUT.eat(1);
// if it's not a line break, then we ran into a bad character inline
if(INPUT && !Exp::Break().Matches(INPUT))
throw ParserException(INPUT.mark(), ErrorMsg::CHAR_IN_BLOCK);
// set the initial indentation
if(GetTopIndent() >= 0)
params.indent += GetTopIndent();
params.eatLeadingWhitespace = false;
params.trimTrailingSpaces = false;
params.onTabInIndentation = THROW;
scalar = ScanScalar(INPUT, params);
// simple keys always ok after block scalars (since we're gonna start a new line anyways)
m_simpleKeyAllowed = true;
m_canBeJSONFlow = false;
Token token(Token::NON_PLAIN_SCALAR, mark);
token.value = scalar;
m_tokens.push(token);
}
}
#ifndef SETTING_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define SETTING_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <memory>
#include <vector>
#include "yaml-cpp/noncopyable.h"
namespace YAML
{
class SettingChangeBase;
template <typename T>
class Setting
{
public:
Setting(): m_value() {}
const T get() const { return m_value; }
std::auto_ptr <SettingChangeBase> set(const T& value);
void restore(const Setting<T>& oldSetting) {
m_value = oldSetting.get();
}
private:
T m_value;
};
class SettingChangeBase
{
public:
virtual ~SettingChangeBase() {}
virtual void pop() = 0;
};
template <typename T>
class SettingChange: public SettingChangeBase
{
public:
SettingChange(Setting<T> *pSetting): m_pCurSetting(pSetting) {
// copy old setting to save its state
m_oldSetting = *pSetting;
}
virtual void pop() {
m_pCurSetting->restore(m_oldSetting);
}
private:
Setting<T> *m_pCurSetting;
Setting<T> m_oldSetting;
};
template <typename T>
inline std::auto_ptr <SettingChangeBase> Setting<T>::set(const T& value) {
std::auto_ptr <SettingChangeBase> pChange(new SettingChange<T> (this));
m_value = value;
return pChange;
}
class SettingChanges: private noncopyable
{
public:
SettingChanges() {}
~SettingChanges() { clear(); }
void clear() {
restore();
for(setting_changes::const_iterator it=m_settingChanges.begin();it!=m_settingChanges.end();++it)
delete *it;
m_settingChanges.clear();
}
void restore() {
for(setting_changes::const_iterator it=m_settingChanges.begin();it!=m_settingChanges.end();++it)
(*it)->pop();
}
void push(std::auto_ptr <SettingChangeBase> pSettingChange) {
m_settingChanges.push_back(pSettingChange.release());
}
// like std::auto_ptr - assignment is transfer of ownership
SettingChanges& operator = (SettingChanges& rhs) {
if(this == &rhs)
return *this;
clear();
m_settingChanges = rhs.m_settingChanges;
rhs.m_settingChanges.clear();
return *this;
}
private:
typedef std::vector <SettingChangeBase *> setting_changes;
setting_changes m_settingChanges;
};
}
#endif // SETTING_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "scanner.h"
#include "token.h"
#include "yaml-cpp/exceptions.h"
#include "exp.h"
namespace YAML
{
Scanner::SimpleKey::SimpleKey(const Mark& mark_, int flowLevel_)
: mark(mark_), flowLevel(flowLevel_), pIndent(0), pMapStart(0), pKey(0)
{
}
void Scanner::SimpleKey::Validate()
{
// Note: pIndent will *not* be garbage here;
// we "garbage collect" them so we can
// always refer to them
if(pIndent)
pIndent->status = IndentMarker::VALID;
if(pMapStart)
pMapStart->status = Token::VALID;
if(pKey)
pKey->status = Token::VALID;
}
void Scanner::SimpleKey::Invalidate()
{
if(pIndent)
pIndent->status = IndentMarker::INVALID;
if(pMapStart)
pMapStart->status = Token::INVALID;
if(pKey)
pKey->status = Token::INVALID;
}
// CanInsertPotentialSimpleKey
bool Scanner::CanInsertPotentialSimpleKey() const
{
if(!m_simpleKeyAllowed)
return false;
return !ExistsActiveSimpleKey();
}
// ExistsActiveSimpleKey
// . Returns true if there's a potential simple key at our flow level
// (there's allowed at most one per flow level, i.e., at the start of the flow start token)
bool Scanner::ExistsActiveSimpleKey() const
{
if(m_simpleKeys.empty())
return false;
const SimpleKey& key = m_simpleKeys.top();
return key.flowLevel == GetFlowLevel();
}
// InsertPotentialSimpleKey
// . If we can, add a potential simple key to the queue,
// and save it on a stack.
void Scanner::InsertPotentialSimpleKey()
{
if(!CanInsertPotentialSimpleKey())
return;
SimpleKey key(INPUT.mark(), GetFlowLevel());
// first add a map start, if necessary
if(InBlockContext()) {
key.pIndent = PushIndentTo(INPUT.column(), IndentMarker::MAP);
if(key.pIndent) {
key.pIndent->status = IndentMarker::UNKNOWN;
key.pMapStart = key.pIndent->pStartToken;
key.pMapStart->status = Token::UNVERIFIED;
}
}
// then add the (now unverified) key
m_tokens.push(Token(Token::KEY, INPUT.mark()));
key.pKey = &m_tokens.back();
key.pKey->status = Token::UNVERIFIED;
m_simpleKeys.push(key);
}
// InvalidateSimpleKey
// . Automatically invalidate the simple key in our flow level
void Scanner::InvalidateSimpleKey()
{
if(m_simpleKeys.empty())
return;
// grab top key
SimpleKey& key = m_simpleKeys.top();
if(key.flowLevel != GetFlowLevel())
return;
key.Invalidate();
m_simpleKeys.pop();
}
// VerifySimpleKey
// . Determines whether the latest simple key to be added is valid,
// and if so, makes it valid.
bool Scanner::VerifySimpleKey()
{
if(m_simpleKeys.empty())
return false;
// grab top key
SimpleKey key = m_simpleKeys.top();
// only validate if we're in the correct flow level
if(key.flowLevel != GetFlowLevel())
return false;
m_simpleKeys.pop();
bool isValid = true;
// needs to be less than 1024 characters and inline
if(INPUT.line() != key.mark.line || INPUT.pos() - key.mark.pos > 1024)
isValid = false;
// invalidate key
if(isValid)
key.Validate();
else
key.Invalidate();
return isValid;
}
void Scanner::PopAllSimpleKeys()
{
while(!m_simpleKeys.empty())
m_simpleKeys.pop();
}
}
#include "singledocparser.h"
#include "collectionstack.h"
#include "directives.h"
#include "yaml-cpp/eventhandler.h"
#include "yaml-cpp/exceptions.h"
#include "scanner.h"
#include "tag.h"
#include "token.h"
#include <sstream>
#include <cstdio>
#include <algorithm>
namespace YAML
{
SingleDocParser::SingleDocParser(Scanner& scanner, const Directives& directives): m_scanner(scanner), m_directives(directives), m_pCollectionStack(new CollectionStack), m_curAnchor(0)
{
}
SingleDocParser::~SingleDocParser()
{
}
// HandleDocument
// . Handles the next document
// . Throws a ParserException on error.
void SingleDocParser::HandleDocument(EventHandler& eventHandler)
{
assert(!m_scanner.empty()); // guaranteed that there are tokens
assert(!m_curAnchor);
eventHandler.OnDocumentStart(m_scanner.peek().mark);
// eat doc start
if(m_scanner.peek().type == Token::DOC_START)
m_scanner.pop();
// recurse!
HandleNode(eventHandler);
eventHandler.OnDocumentEnd();
// and finally eat any doc ends we see
while(!m_scanner.empty() && m_scanner.peek().type == Token::DOC_END)
m_scanner.pop();
}
void SingleDocParser::HandleNode(EventHandler& eventHandler)
{
// an empty node *is* a possibility
if(m_scanner.empty()) {
eventHandler.OnNull(Mark::null(), NullAnchor);
return;
}
// save location
Mark mark = m_scanner.peek().mark;
// special case: a value node by itself must be a map, with no header
if(m_scanner.peek().type == Token::VALUE) {
eventHandler.OnMapStart(mark, "", NullAnchor);
HandleMap(eventHandler);
eventHandler.OnMapEnd();
return;
}
// special case: an alias node
if(m_scanner.peek().type == Token::ALIAS) {
eventHandler.OnAlias(mark, LookupAnchor(mark, m_scanner.peek().value));
m_scanner.pop();
return;
}
std::string tag;
anchor_t anchor;
ParseProperties(tag, anchor);
const Token& token = m_scanner.peek();
// add non-specific tags
if(tag.empty())
tag = (token.type == Token::NON_PLAIN_SCALAR ? "!" : "?");
// now split based on what kind of node we should be
switch(token.type) {
case Token::PLAIN_SCALAR:
case Token::NON_PLAIN_SCALAR:
eventHandler.OnScalar(mark, tag, anchor, token.value);
m_scanner.pop();
return;
case Token::FLOW_SEQ_START:
case Token::BLOCK_SEQ_START:
eventHandler.OnSequenceStart(mark, tag, anchor);
HandleSequence(eventHandler);
eventHandler.OnSequenceEnd();
return;
case Token::FLOW_MAP_START:
case Token::BLOCK_MAP_START:
eventHandler.OnMapStart(mark, tag, anchor);
HandleMap(eventHandler);
eventHandler.OnMapEnd();
return;
case Token::KEY:
// compact maps can only go in a flow sequence
if(m_pCollectionStack->GetCurCollectionType() == CollectionType::FlowSeq) {
eventHandler.OnMapStart(mark, tag, anchor);
HandleMap(eventHandler);
eventHandler.OnMapEnd();
return;
}
break;
default:
break;
}
if(tag == "?")
eventHandler.OnNull(mark, anchor);
else
eventHandler.OnScalar(mark, tag, anchor, "");
}
void SingleDocParser::HandleSequence(EventHandler& eventHandler)
{
// split based on start token
switch(m_scanner.peek().type) {
case Token::BLOCK_SEQ_START: HandleBlockSequence(eventHandler); break;
case Token::FLOW_SEQ_START: HandleFlowSequence(eventHandler); break;
default: break;
}
}
void SingleDocParser::HandleBlockSequence(EventHandler& eventHandler)
{
// eat start token
m_scanner.pop();
m_pCollectionStack->PushCollectionType(CollectionType::BlockSeq);
while(1) {
if(m_scanner.empty())
throw ParserException(Mark::null(), ErrorMsg::END_OF_SEQ);
Token token = m_scanner.peek();
if(token.type != Token::BLOCK_ENTRY && token.type != Token::BLOCK_SEQ_END)
throw ParserException(token.mark, ErrorMsg::END_OF_SEQ);
m_scanner.pop();
if(token.type == Token::BLOCK_SEQ_END)
break;
// check for null
if(!m_scanner.empty()) {
const Token& token = m_scanner.peek();
if(token.type == Token::BLOCK_ENTRY || token.type == Token::BLOCK_SEQ_END) {
eventHandler.OnNull(token.mark, NullAnchor);
continue;
}
}
HandleNode(eventHandler);
}
m_pCollectionStack->PopCollectionType(CollectionType::BlockSeq);
}
void SingleDocParser::HandleFlowSequence(EventHandler& eventHandler)
{
// eat start token
m_scanner.pop();
m_pCollectionStack->PushCollectionType(CollectionType::FlowSeq);
while(1) {
if(m_scanner.empty())
throw ParserException(Mark::null(), ErrorMsg::END_OF_SEQ_FLOW);
// first check for end
if(m_scanner.peek().type == Token::FLOW_SEQ_END) {
m_scanner.pop();
break;
}
// then read the node
HandleNode(eventHandler);
// now eat the separator (or could be a sequence end, which we ignore - but if it's neither, then it's a bad node)
Token& token = m_scanner.peek();
if(token.type == Token::FLOW_ENTRY)
m_scanner.pop();
else if(token.type != Token::FLOW_SEQ_END)
throw ParserException(token.mark, ErrorMsg::END_OF_SEQ_FLOW);
}
m_pCollectionStack->PopCollectionType(CollectionType::FlowSeq);
}
void SingleDocParser::HandleMap(EventHandler& eventHandler)
{
// split based on start token
switch(m_scanner.peek().type) {
case Token::BLOCK_MAP_START: HandleBlockMap(eventHandler); break;
case Token::FLOW_MAP_START: HandleFlowMap(eventHandler); break;
case Token::KEY: HandleCompactMap(eventHandler); break;
case Token::VALUE: HandleCompactMapWithNoKey(eventHandler); break;
default: break;
}
}
void SingleDocParser::HandleBlockMap(EventHandler& eventHandler)
{
// eat start token
m_scanner.pop();
m_pCollectionStack->PushCollectionType(CollectionType::BlockMap);
while(1) {
if(m_scanner.empty())
throw ParserException(Mark::null(), ErrorMsg::END_OF_MAP);
Token token = m_scanner.peek();
if(token.type != Token::KEY && token.type != Token::VALUE && token.type != Token::BLOCK_MAP_END)
throw ParserException(token.mark, ErrorMsg::END_OF_MAP);
if(token.type == Token::BLOCK_MAP_END) {
m_scanner.pop();
break;
}
// grab key (if non-null)
if(token.type == Token::KEY) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
eventHandler.OnNull(token.mark, NullAnchor);
}
// now grab value (optional)
if(!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
eventHandler.OnNull(token.mark, NullAnchor);
}
}
m_pCollectionStack->PopCollectionType(CollectionType::BlockMap);
}
void SingleDocParser::HandleFlowMap(EventHandler& eventHandler)
{
// eat start token
m_scanner.pop();
m_pCollectionStack->PushCollectionType(CollectionType::FlowMap);
while(1) {
if(m_scanner.empty())
throw ParserException(Mark::null(), ErrorMsg::END_OF_MAP_FLOW);
Token& token = m_scanner.peek();
// first check for end
if(token.type == Token::FLOW_MAP_END) {
m_scanner.pop();
break;
}
// grab key (if non-null)
if(token.type == Token::KEY) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
eventHandler.OnNull(token.mark, NullAnchor);
}
// now grab value (optional)
if(!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
eventHandler.OnNull(token.mark, NullAnchor);
}
// now eat the separator (or could be a map end, which we ignore - but if it's neither, then it's a bad node)
Token& nextToken = m_scanner.peek();
if(nextToken.type == Token::FLOW_ENTRY)
m_scanner.pop();
else if(nextToken.type != Token::FLOW_MAP_END)
throw ParserException(nextToken.mark, ErrorMsg::END_OF_MAP_FLOW);
}
m_pCollectionStack->PopCollectionType(CollectionType::FlowMap);
}
// . Single "key: value" pair in a flow sequence
void SingleDocParser::HandleCompactMap(EventHandler& eventHandler)
{
m_pCollectionStack->PushCollectionType(CollectionType::CompactMap);
// grab key
Mark mark = m_scanner.peek().mark;
m_scanner.pop();
HandleNode(eventHandler);
// now grab value (optional)
if(!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
eventHandler.OnNull(mark, NullAnchor);
}
m_pCollectionStack->PopCollectionType(CollectionType::CompactMap);
}
// . Single ": value" pair in a flow sequence
void SingleDocParser::HandleCompactMapWithNoKey(EventHandler& eventHandler)
{
m_pCollectionStack->PushCollectionType(CollectionType::CompactMap);
// null key
eventHandler.OnNull(m_scanner.peek().mark, NullAnchor);
// grab value
m_scanner.pop();
HandleNode(eventHandler);
m_pCollectionStack->PopCollectionType(CollectionType::CompactMap);
}
// ParseProperties
// . Grabs any tag or anchor tokens and deals with them.
void SingleDocParser::ParseProperties(std::string& tag, anchor_t& anchor)
{
tag.clear();
anchor = NullAnchor;
while(1) {
if(m_scanner.empty())
return;
switch(m_scanner.peek().type) {
case Token::TAG: ParseTag(tag); break;
case Token::ANCHOR: ParseAnchor(anchor); break;
default: return;
}
}
}
void SingleDocParser::ParseTag(std::string& tag)
{
Token& token = m_scanner.peek();
if(!tag.empty())
throw ParserException(token.mark, ErrorMsg::MULTIPLE_TAGS);
Tag tagInfo(token);
tag = tagInfo.Translate(m_directives);
m_scanner.pop();
}
void SingleDocParser::ParseAnchor(anchor_t& anchor)
{
Token& token = m_scanner.peek();
if(anchor)
throw ParserException(token.mark, ErrorMsg::MULTIPLE_ANCHORS);
anchor = RegisterAnchor(token.value);
m_scanner.pop();
}
anchor_t SingleDocParser::RegisterAnchor(const std::string& name)
{
if(name.empty())
return NullAnchor;
return m_anchors[name] = ++m_curAnchor;
}
anchor_t SingleDocParser::LookupAnchor(const Mark& mark, const std::string& name) const
{
Anchors::const_iterator it = m_anchors.find(name);
if(it == m_anchors.end())
throw ParserException(mark, ErrorMsg::UNKNOWN_ANCHOR);
return it->second;
}
}
#ifndef SINGLEDOCPARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define SINGLEDOCPARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include "yaml-cpp/anchor.h"
#include "yaml-cpp/noncopyable.h"
#include <string>
#include <map>
#include <memory>
namespace YAML
{
struct Directives;
struct Mark;
struct Token;
class CollectionStack;
class EventHandler;
class Node;
class Scanner;
class SingleDocParser: private noncopyable
{
public:
SingleDocParser(Scanner& scanner, const Directives& directives);
~SingleDocParser();
void HandleDocument(EventHandler& eventHandler);
private:
void HandleNode(EventHandler& eventHandler);
void HandleSequence(EventHandler& eventHandler);
void HandleBlockSequence(EventHandler& eventHandler);
void HandleFlowSequence(EventHandler& eventHandler);
void HandleMap(EventHandler& eventHandler);
void HandleBlockMap(EventHandler& eventHandler);
void HandleFlowMap(EventHandler& eventHandler);
void HandleCompactMap(EventHandler& eventHandler);
void HandleCompactMapWithNoKey(EventHandler& eventHandler);
void ParseProperties(std::string& tag, anchor_t& anchor);
void ParseTag(std::string& tag);
void ParseAnchor(anchor_t& anchor);
anchor_t RegisterAnchor(const std::string& name);
anchor_t LookupAnchor(const Mark& mark, const std::string& name) const;
private:
Scanner& m_scanner;
const Directives& m_directives;
std::auto_ptr<CollectionStack> m_pCollectionStack;
typedef std::map<std::string, anchor_t> Anchors;
Anchors m_anchors;
anchor_t m_curAnchor;
};
}
#endif // SINGLEDOCPARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "stream.h"
#include <iostream>
#include "exp.h"
#ifndef YAML_PREFETCH_SIZE
#define YAML_PREFETCH_SIZE 2048
#endif
#define S_ARRAY_SIZE( A ) (sizeof(A)/sizeof(*(A)))
#define S_ARRAY_END( A ) ((A) + S_ARRAY_SIZE(A))
#define CP_REPLACEMENT_CHARACTER (0xFFFD)
namespace YAML
{
enum UtfIntroState {
uis_start,
uis_utfbe_b1,
uis_utf32be_b2,
uis_utf32be_bom3,
uis_utf32be,
uis_utf16be,
uis_utf16be_bom1,
uis_utfle_bom1,
uis_utf16le_bom2,
uis_utf32le_bom3,
uis_utf16le,
uis_utf32le,
uis_utf8_imp,
uis_utf16le_imp,
uis_utf32le_imp3,
uis_utf8_bom1,
uis_utf8_bom2,
uis_utf8,
uis_error
};
enum UtfIntroCharType {
uict00,
uictBB,
uictBF,
uictEF,
uictFE,
uictFF,
uictAscii,
uictOther,
uictMax
};
static bool s_introFinalState[] = {
false, //uis_start
false, //uis_utfbe_b1
false, //uis_utf32be_b2
false, //uis_utf32be_bom3
true, //uis_utf32be
true, //uis_utf16be
false, //uis_utf16be_bom1
false, //uis_utfle_bom1
false, //uis_utf16le_bom2
false, //uis_utf32le_bom3
true, //uis_utf16le
true, //uis_utf32le
false, //uis_utf8_imp
false, //uis_utf16le_imp
false, //uis_utf32le_imp3
false, //uis_utf8_bom1
false, //uis_utf8_bom2
true, //uis_utf8
true, //uis_error
};
static UtfIntroState s_introTransitions[][uictMax] = {
// uict00, uictBB, uictBF, uictEF, uictFE, uictFF, uictAscii, uictOther
{uis_utfbe_b1, uis_utf8, uis_utf8, uis_utf8_bom1, uis_utf16be_bom1, uis_utfle_bom1, uis_utf8_imp, uis_utf8},
{uis_utf32be_b2, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf16be, uis_utf8},
{uis_utf32be, uis_utf8, uis_utf8, uis_utf8, uis_utf32be_bom3, uis_utf8, uis_utf8, uis_utf8},
{uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf32be, uis_utf8, uis_utf8},
{uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be},
{uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be},
{uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf16be, uis_utf8, uis_utf8},
{uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf16le_bom2, uis_utf8, uis_utf8, uis_utf8},
{uis_utf32le_bom3, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
{uis_utf32le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
{uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
{uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le},
{uis_utf16le_imp, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
{uis_utf32le_imp3, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
{uis_utf32le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
{uis_utf8, uis_utf8_bom2, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
{uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
{uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
};
static char s_introUngetCount[][uictMax] = {
// uict00, uictBB, uictBF, uictEF, uictFE, uictFF, uictAscii, uictOther
{0, 1, 1, 0, 0, 0, 0, 1},
{0, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 0, 3, 3, 3},
{4, 4, 4, 4, 4, 0, 4, 4},
{1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 0, 2, 2},
{2, 2, 2, 2, 0, 2, 2, 2},
{0, 1, 1, 1, 1, 1, 1, 1},
{0, 2, 2, 2, 2, 2, 2, 2},
{1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1},
{0, 2, 2, 2, 2, 2, 2, 2},
{0, 3, 3, 3, 3, 3, 3, 3},
{4, 4, 4, 4, 4, 4, 4, 4},
{2, 0, 2, 2, 2, 2, 2, 2},
{3, 3, 0, 3, 3, 3, 3, 3},
{1, 1, 1, 1, 1, 1, 1, 1},
};
inline UtfIntroCharType IntroCharTypeOf(std::istream::int_type ch)
{
if (std::istream::traits_type::eof() == ch) {
return uictOther;
}
switch (ch) {
case 0: return uict00;
case 0xBB: return uictBB;
case 0xBF: return uictBF;
case 0xEF: return uictEF;
case 0xFE: return uictFE;
case 0xFF: return uictFF;
}
if ((ch > 0) && (ch < 0xFF)) {
return uictAscii;
}
return uictOther;
}
inline char Utf8Adjust(unsigned long ch, unsigned char lead_bits, unsigned char rshift)
{
const unsigned char header = ((1 << lead_bits) - 1) << (8 - lead_bits);
const unsigned char mask = (0xFF >> (lead_bits + 1));
return static_cast<char>(static_cast<unsigned char>(
header | ((ch >> rshift) & mask)
));
}
inline void QueueUnicodeCodepoint(std::deque<char>& q, unsigned long ch)
{
// We are not allowed to queue the Stream::eof() codepoint, so
// replace it with CP_REPLACEMENT_CHARACTER
if (static_cast<unsigned long>(Stream::eof()) == ch)
{
ch = CP_REPLACEMENT_CHARACTER;
}
if (ch < 0x80)
{
q.push_back(Utf8Adjust(ch, 0, 0));
}
else if (ch < 0x800)
{
q.push_back(Utf8Adjust(ch, 2, 6));
q.push_back(Utf8Adjust(ch, 1, 0));
}
else if (ch < 0x10000)
{
q.push_back(Utf8Adjust(ch, 3, 12));
q.push_back(Utf8Adjust(ch, 1, 6));
q.push_back(Utf8Adjust(ch, 1, 0));
}
else
{
q.push_back(Utf8Adjust(ch, 4, 18));
q.push_back(Utf8Adjust(ch, 1, 12));
q.push_back(Utf8Adjust(ch, 1, 6));
q.push_back(Utf8Adjust(ch, 1, 0));
}
}
Stream::Stream(std::istream& input)
: m_input(input), m_nPushedBack(0),
m_pPrefetched(new unsigned char[YAML_PREFETCH_SIZE]),
m_nPrefetchedAvailable(0), m_nPrefetchedUsed(0)
{
typedef std::istream::traits_type char_traits;
if(!input)
return;
// Determine (or guess) the character-set by reading the BOM, if any. See
// the YAML specification for the determination algorithm.
char_traits::int_type intro[4];
int nIntroUsed = 0;
UtfIntroState state = uis_start;
for (; !s_introFinalState[state]; ) {
std::istream::int_type ch = input.get();
intro[nIntroUsed++] = ch;
UtfIntroCharType charType = IntroCharTypeOf(ch);
UtfIntroState newState = s_introTransitions[state][charType];
int nUngets = s_introUngetCount[state][charType];
if (nUngets > 0) {
for (; nUngets > 0; --nUngets) {
if (char_traits::eof() != intro[--nIntroUsed]) {
m_bufPushback[m_nPushedBack++] =
char_traits::to_char_type(intro[nIntroUsed]);
}
}
}
state = newState;
}
switch (state) {
case uis_utf8: m_charSet = utf8; break;
case uis_utf16le: m_charSet = utf16le; break;
case uis_utf16be: m_charSet = utf16be; break;
case uis_utf32le: m_charSet = utf32le; break;
case uis_utf32be: m_charSet = utf32be; break;
default: m_charSet = utf8; break;
}
ReadAheadTo(0);
}
Stream::~Stream()
{
delete[] m_pPrefetched;
}
char Stream::peek() const
{
if (m_readahead.empty())
{
return Stream::eof();
}
return m_readahead[0];
}
Stream::operator bool() const
{
return m_input.good() || (!m_readahead.empty() && m_readahead[0] != Stream::eof());
}
// get
// . Extracts a character from the stream and updates our position
char Stream::get()
{
char ch = peek();
AdvanceCurrent();
m_mark.column++;
if(ch == '\n') {
m_mark.column = 0;
m_mark.line++;
}
return ch;
}
// get
// . Extracts 'n' characters from the stream and updates our position
std::string Stream::get(int n)
{
std::string ret;
ret.reserve(n);
for(int i=0;i<n;i++)
ret += get();
return ret;
}
// eat
// . Eats 'n' characters and updates our position.
void Stream::eat(int n)
{
for(int i=0;i<n;i++)
get();
}
void Stream::AdvanceCurrent()
{
if (!m_readahead.empty())
{
m_readahead.pop_front();
m_mark.pos++;
}
ReadAheadTo(0);
}
bool Stream::_ReadAheadTo(size_t i) const
{
while (m_input.good() && (m_readahead.size() <= i))
{
switch (m_charSet)
{
case utf8: StreamInUtf8(); break;
case utf16le: StreamInUtf16(); break;
case utf16be: StreamInUtf16(); break;
case utf32le: StreamInUtf32(); break;
case utf32be: StreamInUtf32(); break;
}
}
// signal end of stream
if(!m_input.good())
m_readahead.push_back(Stream::eof());
return m_readahead.size() > i;
}
void Stream::StreamInUtf8() const
{
unsigned char b = GetNextByte();
if (m_input.good())
{
m_readahead.push_back(b);
}
}
void Stream::StreamInUtf16() const
{
unsigned long ch = 0;
unsigned char bytes[2];
int nBigEnd = (m_charSet == utf16be) ? 0 : 1;
bytes[0] = GetNextByte();
bytes[1] = GetNextByte();
if (!m_input.good())
{
return;
}
ch = (static_cast<unsigned long>(bytes[nBigEnd]) << 8) |
static_cast<unsigned long>(bytes[1 ^ nBigEnd]);
if (ch >= 0xDC00 && ch < 0xE000)
{
// Trailing (low) surrogate...ugh, wrong order
QueueUnicodeCodepoint(m_readahead, CP_REPLACEMENT_CHARACTER);
return;
}
else if (ch >= 0xD800 && ch < 0xDC00)
{
// ch is a leading (high) surrogate
// Four byte UTF-8 code point
// Read the trailing (low) surrogate
for (;;)
{
bytes[0] = GetNextByte();
bytes[1] = GetNextByte();
if (!m_input.good())
{
QueueUnicodeCodepoint(m_readahead, CP_REPLACEMENT_CHARACTER);
return;
}
unsigned long chLow = (static_cast<unsigned long>(bytes[nBigEnd]) << 8) |
static_cast<unsigned long>(bytes[1 ^ nBigEnd]);
if (chLow < 0xDC00 || ch >= 0xE000)
{
// Trouble...not a low surrogate. Dump a REPLACEMENT CHARACTER into the stream.
QueueUnicodeCodepoint(m_readahead, CP_REPLACEMENT_CHARACTER);
// Deal with the next UTF-16 unit
if (chLow < 0xD800 || ch >= 0xE000)
{
// Easiest case: queue the codepoint and return
QueueUnicodeCodepoint(m_readahead, ch);
return;
}
else
{
// Start the loop over with the new high surrogate
ch = chLow;
continue;
}
}
// Select the payload bits from the high surrogate
ch &= 0x3FF;
ch <<= 10;
// Include bits from low surrogate
ch |= (chLow & 0x3FF);
// Add the surrogacy offset
ch += 0x10000;
}
}
QueueUnicodeCodepoint(m_readahead, ch);
}
inline char* ReadBuffer(unsigned char* pBuffer)
{
return reinterpret_cast<char*>(pBuffer);
}
unsigned char Stream::GetNextByte() const
{
if (m_nPushedBack)
{
return m_bufPushback[--m_nPushedBack];
}
if (m_nPrefetchedUsed >= m_nPrefetchedAvailable)
{
std::streambuf *pBuf = m_input.rdbuf();
m_nPrefetchedAvailable = pBuf->sgetn(ReadBuffer(m_pPrefetched),
YAML_PREFETCH_SIZE);
m_nPrefetchedUsed = 0;
if (!m_nPrefetchedAvailable)
{
m_input.setstate(std::ios_base::eofbit);
}
if (0 == m_nPrefetchedAvailable)
{
return 0;
}
}
return m_pPrefetched[m_nPrefetchedUsed++];
}
void Stream::StreamInUtf32() const
{
static int indexes[2][4] = {
{3, 2, 1, 0},
{0, 1, 2, 3}
};
unsigned long ch = 0;
unsigned char bytes[4];
int* pIndexes = (m_charSet == utf32be) ? indexes[1] : indexes[0];
bytes[0] = GetNextByte();
bytes[1] = GetNextByte();
bytes[2] = GetNextByte();
bytes[3] = GetNextByte();
if (!m_input.good())
{
return;
}
for (int i = 0; i < 4; ++i)
{
ch <<= 8;
ch |= bytes[pIndexes[i]];
}
QueueUnicodeCodepoint(m_readahead, ch);
}
}
#ifndef STREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define STREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include "yaml-cpp/noncopyable.h"
#include "yaml-cpp/mark.h"
#include <cstddef>
#include <deque>
#include <ios>
#include <iostream>
#include <set>
#include <string>
namespace YAML
{
static const size_t MAX_PARSER_PUSHBACK = 8;
class Stream: private noncopyable
{
public:
friend class StreamCharSource;
Stream(std::istream& input);
~Stream();
operator bool() const;
bool operator !() const { return !static_cast <bool>(*this); }
char peek() const;
char get();
std::string get(int n);
void eat(int n = 1);
static char eof() { return 0x04; }
const Mark mark() const { return m_mark; }
int pos() const { return m_mark.pos; }
int line() const { return m_mark.line; }
int column() const { return m_mark.column; }
void ResetColumn() { m_mark.column = 0; }
private:
enum CharacterSet {utf8, utf16le, utf16be, utf32le, utf32be};
std::istream& m_input;
Mark m_mark;
CharacterSet m_charSet;
unsigned char m_bufPushback[MAX_PARSER_PUSHBACK];
mutable size_t m_nPushedBack;
mutable std::deque<char> m_readahead;
unsigned char* const m_pPrefetched;
mutable size_t m_nPrefetchedAvailable;
mutable size_t m_nPrefetchedUsed;
void AdvanceCurrent();
char CharAt(size_t i) const;
bool ReadAheadTo(size_t i) const;
bool _ReadAheadTo(size_t i) const;
void StreamInUtf8() const;
void StreamInUtf16() const;
void StreamInUtf32() const;
unsigned char GetNextByte() const;
};
// CharAt
// . Unchecked access
inline char Stream::CharAt(size_t i) const {
return m_readahead[i];
}
inline bool Stream::ReadAheadTo(size_t i) const {
if(m_readahead.size() > i)
return true;
return _ReadAheadTo(i);
}
}
#endif // STREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#ifndef STREAMCHARSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define STREAMCHARSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include "yaml-cpp/noncopyable.h"
#include <cstddef>
namespace YAML
{
class StreamCharSource
{
public:
StreamCharSource(const Stream& stream): m_offset(0), m_stream(stream) {}
StreamCharSource(const StreamCharSource& source): m_offset(source.m_offset), m_stream(source.m_stream) {}
~StreamCharSource() {}
operator bool() const;
char operator [] (std::size_t i) const { return m_stream.CharAt(m_offset + i); }
bool operator !() const { return !static_cast<bool>(*this); }
const StreamCharSource operator + (int i) const;
private:
std::size_t m_offset;
const Stream& m_stream;
StreamCharSource& operator = (const StreamCharSource&); // non-assignable
};
inline StreamCharSource::operator bool() const {
return m_stream.ReadAheadTo(m_offset);
}
inline const StreamCharSource StreamCharSource::operator + (int i) const {
StreamCharSource source(*this);
if(static_cast<int> (source.m_offset) + i >= 0)
source.m_offset += i;
else
source.m_offset = 0;
return source;
}
}
#endif // STREAMCHARSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#ifndef STRINGSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define STRINGSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <cstddef>
namespace YAML
{
class StringCharSource
{
public:
StringCharSource(const char *str, std::size_t size): m_str(str), m_size(size), m_offset(0) {}
operator bool() const { return m_offset < m_size; }
char operator [] (std::size_t i) const { return m_str[m_offset + i]; }
bool operator !() const { return !static_cast<bool>(*this); }
const StringCharSource operator + (int i) const {
StringCharSource source(*this);
if(static_cast<int> (source.m_offset) + i >= 0)
source.m_offset += i;
else
source.m_offset = 0;
return source;
}
StringCharSource& operator ++ () {
++m_offset;
return *this;
}
StringCharSource& operator += (std::size_t offset) {
m_offset += offset;
return *this;
}
private:
const char *m_str;
std::size_t m_size;
std::size_t m_offset;
};
}
#endif // STRINGSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#include "tag.h"
#include "directives.h"
#include "token.h"
#include <cassert>
#include <stdexcept>
namespace YAML
{
Tag::Tag(const Token& token): type(static_cast<TYPE>(token.data))
{
switch(type) {
case VERBATIM:
value = token.value;
break;
case PRIMARY_HANDLE:
value = token.value;
break;
case SECONDARY_HANDLE:
value = token.value;
break;
case NAMED_HANDLE:
handle = token.value;
value = token.params[0];
break;
case NON_SPECIFIC:
break;
default:
assert(false);
}
}
const std::string Tag::Translate(const Directives& directives)
{
switch(type) {
case VERBATIM:
return value;
case PRIMARY_HANDLE:
return directives.TranslateTagHandle("!") + value;
case SECONDARY_HANDLE:
return directives.TranslateTagHandle("!!") + value;
case NAMED_HANDLE:
return directives.TranslateTagHandle("!" + handle + "!") + value;
case NON_SPECIFIC:
// TODO:
return "!";
default:
assert(false);
}
throw std::runtime_error("yaml-cpp: internal error, bad tag type");
}
}
#ifndef TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <string>
namespace YAML
{
struct Token;
struct Directives;
struct Tag {
enum TYPE {
VERBATIM, PRIMARY_HANDLE, SECONDARY_HANDLE, NAMED_HANDLE, NON_SPECIFIC
};
Tag(const Token& token);
const std::string Translate(const Directives& directives);
TYPE type;
std::string handle, value;
};
}
#endif // TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#ifndef TOKEN_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define TOKEN_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if !defined(__GNUC__) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include "yaml-cpp/mark.h"
#include <iostream>
#include <string>
#include <vector>
namespace YAML
{
const std::string TokenNames[] = {
"DIRECTIVE",
"DOC_START",
"DOC_END",
"BLOCK_SEQ_START",
"BLOCK_MAP_START",
"BLOCK_SEQ_END",
"BLOCK_MAP_END",
"BLOCK_ENTRY",
"FLOW_SEQ_START",
"FLOW_MAP_START",
"FLOW_SEQ_END",
"FLOW_MAP_END",
"FLOW_MAP_COMPACT",
"FLOW_ENTRY",
"KEY",
"VALUE",
"ANCHOR",
"ALIAS",
"TAG",
"SCALAR"
};
struct Token {
// enums
enum STATUS { VALID, INVALID, UNVERIFIED };
enum TYPE {
DIRECTIVE,
DOC_START,
DOC_END,
BLOCK_SEQ_START,
BLOCK_MAP_START,
BLOCK_SEQ_END,
BLOCK_MAP_END,
BLOCK_ENTRY,
FLOW_SEQ_START,
FLOW_MAP_START,
FLOW_SEQ_END,
FLOW_MAP_END,
FLOW_MAP_COMPACT,
FLOW_ENTRY,
KEY,
VALUE,
ANCHOR,
ALIAS,
TAG,
PLAIN_SCALAR,
NON_PLAIN_SCALAR
};
// data
Token(TYPE type_, const Mark& mark_): status(VALID), type(type_), mark(mark_), data(0) {}
friend std::ostream& operator << (std::ostream& out, const Token& token) {
out << TokenNames[token.type] << std::string(": ") << token.value;
for(std::size_t i=0;i<token.params.size();i++)
out << std::string(" ") << token.params[i];
return out;
}
STATUS status;
TYPE type;
Mark mark;
std::string value;
std::vector <std::string> params;
int data;
};
}
#endif // TOKEN_H_62B23520_7C8E_11DE_8A39_0800200C9A66
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment