Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
label: "Text Content"
});
tokenList.push(Freestyle);
//Freestyle text needs to be "cut up" by these control characters so that the other rules get a chance to succeed.
//Otherwise, every line would simply be lexed as a single Freestyle token.
//If these chars are not consumed by other rules, they are lexed as "useless" UnusedControlChars. The parser then has to combine Freestyle and UnusedControlChar tokens back together to get "normal text" token sequences.
//Note that some "meaningful" characters (like +) are not listed here, as they are only meaningful after a linebreak and freestyle text already gets "cut up" by each line break.
export const UnusedControlChar = createToken({
name: TokenNames.UNUSED_CONTROL_CHAR,
pattern: /[\@\#\*\_\[\]\,\.\:\;\<\/\>\-\(\)\{\}][ \t]?/,
label: "Text Content (Control Characters)"
});
tokenList.push(UnusedControlChar);
export const EOF = chevrotain.EOF;
const lexerConfig: chevrotain.IMultiModeLexerDefinition = {
modes: {
default_mode: [
Comment, // must come first
FrontMatter,
Data,
EscapedChar, //must come first after $.Comment
SpecialChar,
Emptyline,
Newline,
// Relation tokens must appear before Spaces, otherwise all indentation will always be consumed as spaces.
// Dedent must appear before Indent for handling zero spaces dedents.
Dedent,
Indent,
InferenceStart, //needs to be lexed before OutgoingAttack (- vs --)
if (response.lexerErrors && response.lexerErrors.length > 0) {
logger.log("verbose", response.lexerErrors);
}
if (response.parserErrors && response.parserErrors.length > 0) {
// //add location if token is EOF
var lastToken = _.last(response.tokens);
for (let error of response.parserErrors) {
if (error.token && tokenMatcher(error.token, chevrotain.EOF)) {
const startLine = lastToken.endLine;
const endLine = startLine;
const startOffset = lastToken.endOffset;
const endOffset = startOffset;
const startColumn = lastToken.endColumn;
const endColumn = startColumn;
const newToken = chevrotain.createTokenInstance(
chevrotain.EOF,
"",
startOffset,
endOffset,
startLine,
endLine,
startColumn,
endColumn
);
error.token = newToken;
}
}
}
return response;
}
}
let lexResult = this.lexer.tokenize(request.input);
response.tokens = lexResult.tokens;
response.lexerErrors = lexResult.errors;
this.parser.input = lexResult.tokens;
response.ast = this.parser.argdown();
response.parserErrors = this.parser.errors;
if (response.lexerErrors && response.lexerErrors.length > 0) {
logger.log("verbose", response.lexerErrors);
}
if (response.parserErrors && response.parserErrors.length > 0) {
// //add location if token is EOF
var lastToken = _.last(response.tokens);
for (let error of response.parserErrors) {
if (error.token && tokenMatcher(error.token, chevrotain.EOF)) {
const startLine = lastToken.endLine;
const endLine = startLine;
const startOffset = lastToken.endOffset;
const endOffset = startOffset;
const startColumn = lastToken.endColumn;
const endColumn = startColumn;
const newToken = chevrotain.createTokenInstance(
chevrotain.EOF,
"",
startOffset,
endOffset,
startLine,
endLine,
startColumn,
endColumn
);