UNPKG

10.1 kBJavaScriptView Raw
1const path = require("path");
2const MarkdownIt = require("markdown-it");
3const MaidError = require("./MaidError");
4const md2json = require("md-2-json");
5
6const md = new MarkdownIt({
7 // Enable HTML so that we can ignore it later
8 html: true
9});
10
11const {
12 regex,
13 whole,
14 wildcard,
15 extra,
16 and,
17 matchers: { WHITE_SPACE: SPACE, START, LAZY },
18 flags
19} = require("rexrex");
20
21const space = extra(SPACE);
22const isCommandReTemplate = and(
23 START, // ^
24 wildcard(SPACE),
25 ["Runs" + LAZY, "tasks" + LAZY].join(space)
26);
27
28const MAID_TASKS_SECTION = whole(["<!--", "maid-tasks", "-->"].join(space));
29
30const isCommandRe = new RegExp(isCommandReTemplate, flags.INSENSITIVE);
31const commandsRe = new RegExp(/`([^`]+)`/g);
32const isCommand = v => Boolean(v.match(isCommandRe)) && Boolean(v.match(commandsRe));
33const parseCommand = v => {
34 const inParallel = Boolean(v.match(/in\s+parallel/i));
35 const when = (v.match(/before|after/i) || ["before"])[0];
36 const taskNames = v.match(commandsRe).map(v => /`(.+)`/.exec(v)[1]);
37
38 return {
39 taskNames,
40 when,
41 inParallel: Boolean(inParallel)
42 };
43};
44
45const extractParagraphs = tokens => {
46 const p = [];
47 for (const [index, token] of tokens.entries()) {
48 if (token.type === "paragraph_open") {
49 p.push(tokens[index + 1].content);
50 }
51 }
52 return p;
53};
54
55const isOpenHeader = (token, tag = "h2") => token.type === "heading_open" && token.tag === tag;
56
57const isCloseHeader = (token, tag = "h2") => token.type === "heading_close" && token.tag === tag;
58
59const selectSubset = (tokens, firstIndex, tag = "h2") => {
60 const remaining = tokens.slice(firstIndex + 1);
61
62 const next = remaining.findIndex(t => t.type === "heading_open" && t.tag === tag);
63
64 return remaining.slice(0, next === -1 ? undefined : next);
65};
66
67const isMaidSectionComment = token => {
68 return (
69 token && token.type === "html_block" && regex(MAID_TASKS_SECTION).test(token.content.trim())
70 );
71};
72
73const getSectionByComment = tokens => {
74 let section;
75 for (const [index, token] of tokens.entries()) {
76 if (isCloseHeader(token) && isMaidSectionComment(tokens[index + 1])) {
77 section = tokens[index - 1].content;
78 break;
79 }
80 }
81 return section;
82};
83
84module.exports = (content, { section, filepath } = {}) => {
85 let tokens = md.parse(content);
86 let newContent = content.replace(/(<!-- toc -->(\s|\S)*?<!-- tocstop -->)/g, "");
87 let js = md2json.parse(newContent);
88
89 let categories = Object.keys(js);
90 let catScripts = {};
91 let scripts = categories.map(cat => {
92 // if (catScripts[cat] === undefined) {
93 // catScripts[cat] = [];
94 // }
95 let scriptsObj = js[cat];
96 delete scriptsObj.raw;
97 catScripts[cat] = Object.keys(scriptsObj).map(k => {
98 return { name: k, desc: scriptsObj[k].raw };
99 });
100 });
101 scripts = scripts.filter(e => e !== "raw");
102 const isMaidfile = !filepath || path.basename(filepath) === "FcScripts.md";
103
104 // Automatically get maid section from non maidfile by `<!-- maid-tasks -->` comment
105 if (!section && !isMaidfile) {
106 section = getSectionByComment(tokens);
107
108 if (!section) {
109 return null;
110 }
111 }
112
113 const taskTag = section ? "h3" : "h2";
114
115 const tasks = [];
116
117 if (section) {
118 const firstIndex = tokens.findIndex(
119 (t, i, array) => isOpenHeader(t, "h2") && array[i + 1].content === section
120 );
121
122 if (firstIndex < 0) {
123 throw new MaidError(`Unable to find \`h2\` header titled: '${section}'`);
124 }
125
126 tokens = selectSubset(tokens, firstIndex, "h2");
127 }
128
129 for (const [index, token] of tokens.entries()) {
130 if (isOpenHeader(token, taskTag)) {
131 const task = {
132 name: tokens[index + 1].content,
133 before: [],
134 after: [],
135 scripts: []
136 };
137
138 const sectionTokens = selectSubset(tokens, index, taskTag);
139
140 // Get paragraphs from the tokens of this h2 section
141 const paragraphs = extractParagraphs(sectionTokens);
142 // Set paragraph contents as task description
143 // Except for special commands
144 task.description = paragraphs
145 .filter(p => {
146 const isCommandBool = isCommand(p);
147 if (isCommandBool) {
148 const { taskNames, when, inParallel } = parseCommand(p);
149 task[when].push({
150 taskNames,
151 inParallel
152 });
153 }
154 return !isCommandBool;
155 })
156 .join("\n\n");
157
158 // Get task script from the tokens' code fences
159 // Currently only use the first one
160 for (const token of sectionTokens) {
161 if (token.type === "fence") {
162 task.scripts = [...task.scripts, { src: token.content, type: token.info }];
163 }
164 }
165
166 tasks.push(task);
167 }
168 }
169
170 return {
171 filepath,
172 tasks,
173 catScripts
174 };
175};
176
177module.exports.isCommand = isCommand;
178module.exports.parseCommand = parseCommand;
179//const path = require('path')
180// const MarkdownIt = require('markdown-it')
181// const MaidError = require('./MaidError')
182//
183// const md = new MarkdownIt({
184// // Enable HTML so that we can ignore it later
185// html: true
186// })
187//
188// const {
189// regex,
190// whole,
191// wildcard,
192// extra,
193// and,
194// matchers: { WHITE_SPACE: SPACE, START, LAZY },
195// flags
196// } = require('rexrex')
197//
198// const space = extra(SPACE)
199// const isCommandReTemplate = and(
200// START, // ^
201// wildcard(SPACE),
202// ['Runs' + LAZY, 'tasks' + LAZY].join(space)
203// )
204//
205// const MAID_TASKS_SECTION = whole(['<!--', 'maid-tasks', '-->'].join(space))
206//
207// const isCommandRe = new RegExp(isCommandReTemplate, flags.INSENSITIVE)
208// const commandsRe = new RegExp(/`([^`]+)`/g)
209// const isCommand = v =>
210// Boolean(v.match(isCommandRe)) && Boolean(v.match(commandsRe))
211// const parseCommand = v => {
212// const inParallel = Boolean(v.match(/in\s+parallel/i))
213// const when = (v.match(/before|after/i) || ['before'])[0]
214// const taskNames = v.match(commandsRe).map(v => /`(.+)`/.exec(v)[1])
215//
216// return {
217// taskNames,
218// when,
219// inParallel: Boolean(inParallel)
220// }
221// }
222//
223// const extractParagraphs = tokens => {
224// const p = []
225// for (const [index, token] of tokens.entries()) {
226// if (token.type === 'paragraph_open') {
227// p.push(tokens[index + 1].content)
228// }
229// }
230// return p
231// }
232//
233// const isOpenHeader = (token, tag = 'h2') =>
234// token.type === 'heading_open' && token.tag === tag
235//
236// const isCloseHeader = (token, tag = 'h2') =>
237// token.type === 'heading_close' && token.tag === tag
238//
239// const selectSubset = (tokens, firstIndex, tag = 'h2') => {
240// const remaining = tokens.slice(firstIndex + 1)
241//
242// const next = remaining.findIndex(
243// t => t.type === 'heading_open' && t.tag === tag
244// )
245//
246// return remaining.slice(0, next === -1 ? undefined : next)
247// }
248//
249// const isMaidSectionComment = token => {
250// return (
251// token &&
252// token.type === 'html_block' &&
253// regex(MAID_TASKS_SECTION).test(token.content.trim())
254// )
255// }
256//
257// const getSectionByComment = tokens => {
258// let section
259// for (const [index, token] of tokens.entries()) {
260// if (isCloseHeader(token) && isMaidSectionComment(tokens[index + 1])) {
261// section = tokens[index - 1].content
262// break
263// }
264// }
265// return section
266// }
267//
268// module.exports = (content, { section } = {}) => {
269// let tokens = md.parse(content)
270//
271// // const isMaidfile = !filepath || path.basename(filepath) === 'FcScripts.md'
272//
273// // Automatically get maid section from non maidfile by `<!-- maid-tasks -->` comment
274// if (!section) {
275// section = getSectionByComment(tokens)
276//
277// if (!section) {
278// return null
279// }
280// }
281//
282// const taskTag = section ? 'h3' : 'h2'
283//
284// const tasks = []
285//
286// if (section) {
287// const firstIndex = tokens.findIndex(
288// (t, i, array) => isOpenHeader(t, 'h2') && array[i + 1].content === section
289// )
290//
291// if (firstIndex < 0) {
292// throw new MaidError(`Unable to find \`h2\` header titled: '${section}'`)
293// }
294//
295// tokens = selectSubset(tokens, firstIndex, 'h2')
296// }
297//
298// for (const [index, token] of tokens.entries()) {
299// if (isOpenHeader(token, taskTag)) {
300// const task = {
301// name: tokens[index + 1].content,
302// before: [],
303// after: [],
304// scripts: []
305// }
306//
307// const sectionTokens = selectSubset(tokens, index, taskTag)
308//
309// // Get paragraphs from the tokens of this h2 section
310// const paragraphs = extractParagraphs(sectionTokens)
311// // Set paragraph contents as task description
312// // Except for special commands
313// task.description = paragraphs
314// .filter(p => {
315// const isCommandBool = isCommand(p)
316// if (isCommandBool) {
317// const { taskNames, when, inParallel } = parseCommand(p)
318// task[when].push({
319// taskNames,
320// inParallel
321// })
322// }
323// return !isCommandBool
324// })
325// .join('\n\n')
326//
327// // Get task script from the tokens' code fences
328// // Currently only use the first one
329// for (const token of sectionTokens) {
330// if (token.type === 'fence') {
331// task.scripts = [
332// ...task.scripts,
333// { src: token.content, type: token.info }
334// ]
335// }
336// }
337//
338// tasks.push(task)
339// }
340// }
341//
342// return {
343// tasks
344// }
345// }
346//
347// module.exports.isCommand = isCommand
348// module.exports.parseCommand = parseCommand