From dbd2855bfc75e8c08f9599e38dfc45081a2fc51e Mon Sep 17 00:00:00 2001 From: Raghu Rajagopalan Date: Sat, 26 Mar 2016 22:16:47 +0530 Subject: [PATCH] first couple of tests for the half baked parser --- karma.conf.js | 3 +- src/diagrams/gitGraph/gitGraphAst.js | 40 ++ src/diagrams/gitGraph/gitGraphParser.spec.js | 38 ++ src/diagrams/gitGraph/parser/gitGraph.jison | 6 +- src/diagrams/gitGraph/parser/gitGraph.js | 644 +++++++++++++++++++ 5 files changed, 727 insertions(+), 4 deletions(-) create mode 100644 src/diagrams/gitGraph/gitGraphAst.js create mode 100644 src/diagrams/gitGraph/gitGraphParser.spec.js create mode 100644 src/diagrams/gitGraph/parser/gitGraph.js diff --git a/karma.conf.js b/karma.conf.js index c8e27f2b0..606356abf 100644 --- a/karma.conf.js +++ b/karma.conf.js @@ -19,7 +19,8 @@ module.exports = function (config) { './src/diagrams/example/**/*.spec.js', './src/diagrams/sequenceDiagram/**/*.spec.js', './src/diagrams/classDiagram/**/*.spec.js', - './src/diagrams/gantt/**/*.spec.js' + './src/diagrams/gantt/**/*.spec.js', + './src/diagrams/gitGraph/**/*.spec.js' ], preprocessors: { diff --git a/src/diagrams/gitGraph/gitGraphAst.js b/src/diagrams/gitGraph/gitGraphAst.js new file mode 100644 index 000000000..88ab6229d --- /dev/null +++ b/src/diagrams/gitGraph/gitGraphAst.js @@ -0,0 +1,40 @@ +var crypto = require("crypto"); +var Logger = require('../../logger'); +var log = new Logger.Log(); + +var commits = {}; +var head = null; +var branches = { "master" : head }; +var curBranch = "master"; + +function getId() { + return crypto.randomBytes(20).toString('hex').substring(0, 7); +} +exports.pushCommit = function() { + var commit = { id: getId(), + parent: head == null ? null : head.id}; + head = commit; + commits[commit.id] = commit; + branches[curBranch] = commit.id; + log.debug("in pushCommit"); +} + +exports.createBranch = function(name) { + branches[name] = head.id; + log.debug("in createBranch"); +} + +exports.mergeBranch = function() { + log.debug("in mergeBranch"); +} + +exports.reset = function () { + commits = {}; + head = null; + branches = { "master" : head }; + curBranch = "master"; +} + +exports.getBranches = function() { return branches; } +exports.getCommits = function() { return commits; } +exports.getCurrentBranch = function() { return curBranch; } diff --git a/src/diagrams/gitGraph/gitGraphParser.spec.js b/src/diagrams/gitGraph/gitGraphParser.spec.js new file mode 100644 index 000000000..202c9409f --- /dev/null +++ b/src/diagrams/gitGraph/gitGraphParser.spec.js @@ -0,0 +1,38 @@ +var proxyquire = require('proxyquire'); +var parser = require('./parser/gitGraph').parser; +var ast = require("./gitGraphAst.js"); +describe('when parsing a gitGraph',function() { + beforeEach(function () { + parser.yy = ast; + parser.yy.reset(); + }); + it('should handle a gitGraph defintion', function () { + str = 'gitGraph:\n' + + 'commit'; + + parser.parse(str); + var commits = parser.yy.getCommits(); + console.log(commits); + + expect(Object.keys(commits).length).toBe(1); + expect(parser.yy.getCurrentBranch()).toBe("master"); + expect(Object.keys(parser.yy.getBranches()).length).toBe(1); + }); + it('should handle branch statement', function () { + str = 'gitGraph:\n' + + 'commit\n' + + 'commit\n' + + 'branch newbranch\n' + + 'commit\n' + + 'commit\n'; + + parser.parse(str); + var commits = parser.yy.getCommits(); + console.log(commits); + + expect(Object.keys(commits).length).toBe(4); + expect(parser.yy.getCurrentBranch()).toBe("master"); + expect(Object.keys(parser.yy.getBranches())).toContain('newbranch'); + expect(Object.keys(parser.yy.getBranches()).length).toBe(2); + }); +}); diff --git a/src/diagrams/gitGraph/parser/gitGraph.jison b/src/diagrams/gitGraph/parser/gitGraph.jison index beb944177..6cff3cc85 100644 --- a/src/diagrams/gitGraph/parser/gitGraph.jison +++ b/src/diagrams/gitGraph/parser/gitGraph.jison @@ -48,8 +48,8 @@ line ; statement - : COMMIT - | BRANCH ID - | MERGE ID + : COMMIT {yy.pushCommit()} + | BRANCH ID {yy.createBranch($2)} + | MERGE ID {yy.mergeBranch($2)} ; diff --git a/src/diagrams/gitGraph/parser/gitGraph.js b/src/diagrams/gitGraph/parser/gitGraph.js new file mode 100644 index 000000000..57e9725e2 --- /dev/null +++ b/src/diagrams/gitGraph/parser/gitGraph.js @@ -0,0 +1,644 @@ +/* parser generated by jison 0.4.17 */ +/* + Returns a Parser object of the following structure: + + Parser: { + yy: {} + } + + Parser.prototype: { + yy: {}, + trace: function(), + symbols_: {associative list: name ==> number}, + terminals_: {associative list: number ==> name}, + productions_: [...], + performAction: function anonymous(yytext, yyleng, yylineno, yy, yystate, $$, _$), + table: [...], + defaultActions: {...}, + parseError: function(str, hash), + parse: function(input), + + lexer: { + EOF: 1, + parseError: function(str, hash), + setInput: function(input), + input: function(), + unput: function(str), + more: function(), + less: function(n), + pastInput: function(), + upcomingInput: function(), + showPosition: function(), + test_match: function(regex_match_array, rule_index), + next: function(), + lex: function(), + begin: function(condition), + popState: function(), + _currentRules: function(), + topState: function(), + pushState: function(condition), + + options: { + ranges: boolean (optional: true ==> token location info will include a .range[] member) + flex: boolean (optional: true ==> flex-like lexing behaviour where the rules are tested exhaustively to find the longest match) + backtrack_lexer: boolean (optional: true ==> lexer regexes are tested in order and for each matching regex the action code is invoked; the lexer terminates the scan when a token is returned by the action code) + }, + + performAction: function(yy, yy_, $avoiding_name_collisions, YY_START), + rules: [...], + conditions: {associative list: name ==> set}, + } + } + + + token location info (@$, _$, etc.): { + first_line: n, + last_line: n, + first_column: n, + last_column: n, + range: [start_number, end_number] (where the numbers are indexes into the input string, regular zero-based) + } + + + the parseError function receives a 'hash' object with these members for lexer and parser errors: { + text: (matched text) + token: (the produced terminal token, if any) + line: (yylineno) + } + while parser (grammar) errors will also provide these members, i.e. parser errors deliver a superset of attributes: { + loc: (yylloc) + expected: (string describing the set of expected tokens) + recoverable: (boolean: TRUE when the parser has a error recovery rule available for this particular error) + } +*/ +var gitGraph = (function(){ +var o=function(k,v,o,l){for(o=o||{},l=k.length;l--;o[k[l]]=v);return o},$V0=[7,10,11,12,14]; +var parser = {trace: function trace() { }, +yy: {}, +symbols_: {"error":2,"start":3,"GG":4,":":5,"document":6,"EOF":7,"line":8,"statement":9,"NL":10,"COMMIT":11,"BRANCH":12,"ID":13,"MERGE":14,"$accept":0,"$end":1}, +terminals_: {2:"error",4:"GG",5:":",7:"EOF",10:"NL",11:"COMMIT",12:"BRANCH",13:"ID",14:"MERGE"}, +productions_: [0,[3,4],[6,0],[6,2],[8,1],[8,1],[9,1],[9,2],[9,2]], +performAction: function anonymous(yytext, yyleng, yylineno, yy, yystate /* action[1] */, $$ /* vstack */, _$ /* lstack */) { +/* this == yyval */ + +var $0 = $$.length - 1; +switch (yystate) { +case 1: + return $$[$0-1]; +break; +case 2: +this.$ =[] +break; +case 3: +$$[$0-1].push($$[$0]); this.$ = $$[$0-1] +break; +case 4: +this.$ =$$[$0] +break; +case 6: +yy.pushCommit() +break; +case 7: +yy.createBranch($$[$0]) +break; +case 8: +yy.mergeBranch($$[$0]) +break; +} +}, +table: [{3:1,4:[1,2]},{1:[3]},{5:[1,3]},o($V0,[2,2],{6:4}),{7:[1,5],8:6,9:7,10:[1,8],11:[1,9],12:[1,10],14:[1,11]},{1:[2,1]},o($V0,[2,3]),o($V0,[2,4]),o($V0,[2,5]),o($V0,[2,6]),{13:[1,12]},{13:[1,13]},o($V0,[2,7]),o($V0,[2,8])], +defaultActions: {5:[2,1]}, +parseError: function parseError(str, hash) { + if (hash.recoverable) { + this.trace(str); + } else { + function _parseError (msg, hash) { + this.message = msg; + this.hash = hash; + } + _parseError.prototype = Error; + + throw new _parseError(str, hash); + } +}, +parse: function parse(input) { + var self = this, stack = [0], tstack = [], vstack = [null], lstack = [], table = this.table, yytext = '', yylineno = 0, yyleng = 0, recovering = 0, TERROR = 2, EOF = 1; + var args = lstack.slice.call(arguments, 1); + var lexer = Object.create(this.lexer); + var sharedState = { yy: {} }; + for (var k in this.yy) { + if (Object.prototype.hasOwnProperty.call(this.yy, k)) { + sharedState.yy[k] = this.yy[k]; + } + } + lexer.setInput(input, sharedState.yy); + sharedState.yy.lexer = lexer; + sharedState.yy.parser = this; + if (typeof lexer.yylloc == 'undefined') { + lexer.yylloc = {}; + } + var yyloc = lexer.yylloc; + lstack.push(yyloc); + var ranges = lexer.options && lexer.options.ranges; + if (typeof sharedState.yy.parseError === 'function') { + this.parseError = sharedState.yy.parseError; + } else { + this.parseError = Object.getPrototypeOf(this).parseError; + } + function popStack(n) { + stack.length = stack.length - 2 * n; + vstack.length = vstack.length - n; + lstack.length = lstack.length - n; + } + _token_stack: + var lex = function () { + var token; + token = lexer.lex() || EOF; + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + return token; + }; + var symbol, preErrorSymbol, state, action, a, r, yyval = {}, p, len, newState, expected; + while (true) { + state = stack[stack.length - 1]; + if (this.defaultActions[state]) { + action = this.defaultActions[state]; + } else { + if (symbol === null || typeof symbol == 'undefined') { + symbol = lex(); + } + action = table[state] && table[state][symbol]; + } + if (typeof action === 'undefined' || !action.length || !action[0]) { + var errStr = ''; + expected = []; + for (p in table[state]) { + if (this.terminals_[p] && p > TERROR) { + expected.push('\'' + this.terminals_[p] + '\''); + } + } + if (lexer.showPosition) { + errStr = 'Parse error on line ' + (yylineno + 1) + ':\n' + lexer.showPosition() + '\nExpecting ' + expected.join(', ') + ', got \'' + (this.terminals_[symbol] || symbol) + '\''; + } else { + errStr = 'Parse error on line ' + (yylineno + 1) + ': Unexpected ' + (symbol == EOF ? 'end of input' : '\'' + (this.terminals_[symbol] || symbol) + '\''); + } + this.parseError(errStr, { + text: lexer.match, + token: this.terminals_[symbol] || symbol, + line: lexer.yylineno, + loc: yyloc, + expected: expected + }); + } + if (action[0] instanceof Array && action.length > 1) { + throw new Error('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol); + } + switch (action[0]) { + case 1: + stack.push(symbol); + vstack.push(lexer.yytext); + lstack.push(lexer.yylloc); + stack.push(action[1]); + symbol = null; + if (!preErrorSymbol) { + yyleng = lexer.yyleng; + yytext = lexer.yytext; + yylineno = lexer.yylineno; + yyloc = lexer.yylloc; + if (recovering > 0) { + recovering--; + } + } else { + symbol = preErrorSymbol; + preErrorSymbol = null; + } + break; + case 2: + len = this.productions_[action[1]][1]; + yyval.$ = vstack[vstack.length - len]; + yyval._$ = { + first_line: lstack[lstack.length - (len || 1)].first_line, + last_line: lstack[lstack.length - 1].last_line, + first_column: lstack[lstack.length - (len || 1)].first_column, + last_column: lstack[lstack.length - 1].last_column + }; + if (ranges) { + yyval._$.range = [ + lstack[lstack.length - (len || 1)].range[0], + lstack[lstack.length - 1].range[1] + ]; + } + r = this.performAction.apply(yyval, [ + yytext, + yyleng, + yylineno, + sharedState.yy, + action[1], + vstack, + lstack + ].concat(args)); + if (typeof r !== 'undefined') { + return r; + } + if (len) { + stack = stack.slice(0, -1 * len * 2); + vstack = vstack.slice(0, -1 * len); + lstack = lstack.slice(0, -1 * len); + } + stack.push(this.productions_[action[1]][0]); + vstack.push(yyval.$); + lstack.push(yyval._$); + newState = table[stack[stack.length - 2]][stack[stack.length - 1]]; + stack.push(newState); + break; + case 3: + return true; + } + } + return true; +}}; +/* generated by jison-lex 0.3.4 */ +var lexer = (function(){ +var lexer = ({ + +EOF:1, + +parseError:function parseError(str, hash) { + if (this.yy.parser) { + this.yy.parser.parseError(str, hash); + } else { + throw new Error(str); + } + }, + +// resets the lexer, sets new input +setInput:function (input, yy) { + this.yy = yy || this.yy || {}; + this._input = input; + this._more = this._backtrack = this.done = false; + this.yylineno = this.yyleng = 0; + this.yytext = this.matched = this.match = ''; + this.conditionStack = ['INITIAL']; + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0 + }; + if (this.options.ranges) { + this.yylloc.range = [0,0]; + } + this.offset = 0; + return this; + }, + +// consumes and returns one char from the input +input:function () { + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + var lines = ch.match(/(?:\r\n?|\n).*/g); + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + } else { + this.yylloc.last_column++; + } + if (this.options.ranges) { + this.yylloc.range[1]++; + } + + this._input = this._input.slice(1); + return ch; + }, + +// unshifts one char (or a string) into the input +unput:function (ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + //this.yyleng -= len; + this.offset -= len; + var oldLines = this.match.split(/(?:\r\n?|\n)/g); + this.match = this.match.substr(0, this.match.length - 1); + this.matched = this.matched.substr(0, this.matched.length - 1); + + if (lines.length - 1) { + this.yylineno -= lines.length - 1; + } + var r = this.yylloc.range; + + this.yylloc = { + first_line: this.yylloc.first_line, + last_line: this.yylineno + 1, + first_column: this.yylloc.first_column, + last_column: lines ? + (lines.length === oldLines.length ? this.yylloc.first_column : 0) + + oldLines[oldLines.length - lines.length].length - lines[0].length : + this.yylloc.first_column - len + }; + + if (this.options.ranges) { + this.yylloc.range = [r[0], r[0] + this.yyleng - len]; + } + this.yyleng = this.yytext.length; + return this; + }, + +// When called from action, caches matched text and appends it on next action +more:function () { + this._more = true; + return this; + }, + +// When called from action, signals the lexer that this rule fails to match the input, so the next matching rule (regex) should be tested instead. +reject:function () { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + return this.parseError('Lexical error on line ' + (this.yylineno + 1) + '. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n' + this.showPosition(), { + text: "", + token: null, + line: this.yylineno + }); + + } + return this; + }, + +// retain first n characters of the match +less:function (n) { + this.unput(this.match.slice(n)); + }, + +// displays already matched input, i.e. for error messages +pastInput:function () { + var past = this.matched.substr(0, this.matched.length - this.match.length); + return (past.length > 20 ? '...':'') + past.substr(-20).replace(/\n/g, ""); + }, + +// displays upcoming input, i.e. for error messages +upcomingInput:function () { + var next = this.match; + if (next.length < 20) { + next += this._input.substr(0, 20-next.length); + } + return (next.substr(0,20) + (next.length > 20 ? '...' : '')).replace(/\n/g, ""); + }, + +// displays the character position where the lexing error occurred, i.e. for error messages +showPosition:function () { + var pre = this.pastInput(); + var c = new Array(pre.length + 1).join("-"); + return pre + this.upcomingInput() + "\n" + c + "^"; + }, + +// test the lexed token: return FALSE when not a match, otherwise return token +test_match:function (match, indexed_rule) { + var token, + lines, + backup; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column + }, + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + yy: this.yy, + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + if (this.options.ranges) { + backup.yylloc.range = this.yylloc.range.slice(0); + } + } + + lines = match[0].match(/(?:\r\n?|\n).*/g); + if (lines) { + this.yylineno += lines.length; + } + this.yylloc = { + first_line: this.yylloc.last_line, + last_line: this.yylineno + 1, + first_column: this.yylloc.last_column, + last_column: lines ? + lines[lines.length - 1].length - lines[lines.length - 1].match(/\r?\n?/)[0].length : + this.yylloc.last_column + match[0].length + }; + this.yytext += match[0]; + this.match += match[0]; + this.matches = match; + this.yyleng = this.yytext.length; + if (this.options.ranges) { + this.yylloc.range = [this.offset, this.offset += this.yyleng]; + } + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match[0].length); + this.matched += match[0]; + token = this.performAction.call(this, this.yy, this, indexed_rule, this.conditionStack[this.conditionStack.length - 1]); + if (this.done && this._input) { + this.done = false; + } + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + return false; // rule action called reject() implying the next rule should be tested instead. + } + return false; + }, + +// return next match in input +next:function () { + if (this.done) { + return this.EOF; + } + if (!this._input) { + this.done = true; + } + + var token, + match, + tempMatch, + index; + if (!this._more) { + this.yytext = ''; + this.match = ''; + } + var rules = this._currentRules(); + for (var i = 0; i < rules.length; i++) { + tempMatch = this._input.match(this.rules[rules[i]]); + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rules[i]); + if (token !== false) { + return token; + } else if (this._backtrack) { + match = false; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + if (match) { + token = this.test_match(match, rules[index]); + if (token !== false) { + return token; + } + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + if (this._input === "") { + return this.EOF; + } else { + return this.parseError('Lexical error on line ' + (this.yylineno + 1) + '. Unrecognized text.\n' + this.showPosition(), { + text: "", + token: null, + line: this.yylineno + }); + } + }, + +// return next match that has a token +lex:function lex() { + var r = this.next(); + if (r) { + return r; + } else { + return this.lex(); + } + }, + +// activates a new lexer condition state (pushes the new lexer condition state onto the condition stack) +begin:function begin(condition) { + this.conditionStack.push(condition); + }, + +// pop the previously active lexer condition state off the condition stack +popState:function popState() { + var n = this.conditionStack.length - 1; + if (n > 0) { + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + +// produce the lexer rule set which is active for the currently active lexer condition state +_currentRules:function _currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]].rules; + } else { + return this.conditions["INITIAL"].rules; + } + }, + +// return the currently active lexer condition state; when an index argument is provided it produces the N-th previous condition state, if available +topState:function topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + if (n >= 0) { + return this.conditionStack[n]; + } else { + return "INITIAL"; + } + }, + +// alias for begin(condition) +pushState:function pushState(condition) { + this.begin(condition); + }, + +// return the number of states currently on the stack +stateStackSize:function stateStackSize() { + return this.conditionStack.length; + }, +options: {"case-insensitive":true}, +performAction: function anonymous(yy,yy_,$avoiding_name_collisions,YY_START) { +var YYSTATE=YY_START; +switch($avoiding_name_collisions) { +case 0:return 10; +break; +case 1:/* skip all whitespace */ +break; +case 2:/* skip comments */ +break; +case 3:/* skip comments */ +break; +case 4:return 4; +break; +case 5:return 11; +break; +case 6:return 12; +break; +case 7:return 14; +break; +case 8:return 'RESET'; +break; +case 9:return 5; +break; +case 10:return 13; +break; +case 11:return 7; +break; +} +}, +rules: [/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?::)/i,/^(?:[a-zA-Z][a-zA-Z0-9_]+)/i,/^(?:$)/i], +conditions: {"INITIAL":{"rules":[0,1,2,3,4,5,6,7,8,9,10,11],"inclusive":true}} +}); +return lexer; +})(); +parser.lexer = lexer; +function Parser () { + this.yy = {}; +} +Parser.prototype = parser;parser.Parser = Parser; +return new Parser; +})(); + + +if (typeof require !== 'undefined' && typeof exports !== 'undefined') { +exports.parser = gitGraph; +exports.Parser = gitGraph.Parser; +exports.parse = function () { return gitGraph.parse.apply(gitGraph, arguments); }; +exports.main = function commonjsMain(args) { + if (!args[1]) { + console.log('Usage: '+args[0]+' FILE'); + process.exit(1); + } + var source = require('fs').readFileSync(require('path').normalize(args[1]), "utf8"); + return exports.parser.parse(source); +}; +if (typeof module !== 'undefined' && require.main === module) { + exports.main(process.argv.slice(1)); +} +} \ No newline at end of file