cleaned up code and fixed bugs

- moved operation-specific code to corresponding functions
- added more comments
- fix bug where the series post uses index as slug instead of filename
This commit is contained in:
Kim, Jimin 2022-01-05 22:08:58 +09:00
parent 222107bfae
commit 485b4b8112
2 changed files with 73 additions and 44 deletions

View file

@ -1,22 +1,29 @@
import { map, seriesMap } from "." import { map, seriesMap } from "."
export default function postProcess() { export default function postProcess() {
// sort date /**
* Sort date
*/
const TmpDate = map.date const TmpDate = map.date
map.date = {} map.date = {}
Object.keys(map.date) Object.keys(TmpDate)
.sort() .sort()
.forEach((sortedDateKey) => { .forEach((sortedDateKey) => {
map.date[sortedDateKey] = TmpDate[sortedDateKey] map.date[sortedDateKey] = TmpDate[sortedDateKey]
}) })
// fill meta data /**
* Fill meta data
*/
map.meta.tags = Object.keys(map.tags) map.meta.tags = Object.keys(map.tags)
// sort series post /**
* Parse Series
*/
// sort series map
for (const seriesURL in seriesMap) { for (const seriesURL in seriesMap) {
seriesMap[seriesURL].sort((a, b) => { seriesMap[seriesURL].sort((a, b) => {
if (a.index < b.index) return -1 if (a.index < b.index) return -1
@ -26,6 +33,7 @@ export default function postProcess() {
}) })
} }
// series length and order
for (const seriesURL in seriesMap) { for (const seriesURL in seriesMap) {
map.series[seriesURL].length = seriesMap[seriesURL].length map.series[seriesURL].length = seriesMap[seriesURL].length
map.series[seriesURL].order = seriesMap[seriesURL].map( map.series[seriesURL].order = seriesMap[seriesURL].map(

View file

@ -13,12 +13,11 @@ import { map, seriesMap } from "."
import { MarkdownData, ParseMode, PostData } from "../types/typing" import { MarkdownData, ParseMode, PostData } from "../types/typing"
/** /**
* * Data that's passed from {@link parseFile} to other function
*/ */
interface DataToPass { interface DataToPass {
path: string path: string
urlPath: string urlPath: string
fileOrFolderName: string
markdownRaw: string markdownRaw: string
markdownData: MarkdownData markdownData: MarkdownData
humanizedDuration: string humanizedDuration: string
@ -51,22 +50,19 @@ export function recursiveParse(mode: ParseMode, path: string): void {
} }
} }
function parseFile( function parseFile(mode: ParseMode, path: string, fileName: string): void {
mode: ParseMode,
path: string,
fileOrFolderName: string
): void {
// stop if it is not a markdown file // stop if it is not a markdown file
if (!fileOrFolderName.endsWith(".md")) { if (!fileName.endsWith(".md")) {
console.log(`Ignoring non markdown file at: ${path}`) console.log(`Ignoring non markdown file at: ${path}`)
return return
} }
// raw markdown text /**
* Parse markdown
*/
const markdownRaw = fs.readFileSync(path, "utf8") const markdownRaw = fs.readFileSync(path, "utf8")
const markdownData: MarkdownData = parseFrontMatter(markdownRaw, path, mode) const markdownData: MarkdownData = parseFrontMatter(markdownRaw, path, mode)
// https://github.com/pritishvaidya/read-time-estimate
const { humanizedDuration, totalWords } = readTimeEstimate( const { humanizedDuration, totalWords } = readTimeEstimate(
markdownData.content, markdownData.content,
275, 275,
@ -78,7 +74,6 @@ function parseFile(
const dataToPass: DataToPass = { const dataToPass: DataToPass = {
path, path,
urlPath: path2URL(path), urlPath: path2URL(path),
fileOrFolderName,
markdownRaw, markdownRaw,
markdownData, markdownData,
humanizedDuration, humanizedDuration,
@ -86,30 +81,17 @@ function parseFile(
} }
switch (mode) { switch (mode) {
case ParseMode.POSTS: { case ParseMode.POSTS:
parsePost(dataToPass) parsePost(dataToPass)
break break
}
case ParseMode.UNSEARCHABLE: {
dataToPass.urlPath = dataToPass.urlPath.slice(
dataToPass.urlPath
.slice(1) // ignore the first slash
.indexOf("/") + 1
)
parseUnsearchable(dataToPass)
break
}
case ParseMode.SERIES: {
let urlPath = dataToPass.urlPath
urlPath = urlPath.slice(0, urlPath.lastIndexOf("_"))
dataToPass.urlPath = urlPath.replace(/\/$/, "") // remove trailing slash
case ParseMode.SERIES:
parseSeries(dataToPass) parseSeries(dataToPass)
break break
}
case ParseMode.UNSEARCHABLE:
parseUnsearchable(dataToPass)
break
} }
} }
@ -185,17 +167,40 @@ function parsePost(data: DataToPass): void {
function parseSeries(data: DataToPass): void { function parseSeries(data: DataToPass): void {
const { const {
path, path,
urlPath, urlPath: _urlPath,
fileOrFolderName,
markdownRaw, markdownRaw,
markdownData, markdownData,
humanizedDuration, humanizedDuration,
totalWords, totalWords,
} = data } = data
if (!fileOrFolderName.includes("_") && !fileOrFolderName.startsWith("0")) // last part of the url without the slash
throw Error(`Invalid series post file name at: ${path}`) let lastPath = _urlPath.slice(_urlPath.lastIndexOf("/") + 1)
if (!lastPath.includes("_") && !lastPath.startsWith("0"))
throw Error(`Invalid series file name at: ${path}`)
// if file is a series descriptor or not (not = regular series post)
const isFileDescriptor = lastPath.startsWith("0") && !lastPath.includes("_")
// series post url
if (isFileDescriptor) {
lastPath = ""
} else {
lastPath = lastPath
.slice(lastPath.indexOf("_") + 1) // get string after the series index
.replace(/\/$/, "") // remove trailing slash
}
// get url until right before the lastPath
const urlUntilLastPath = _urlPath.slice(0, _urlPath.lastIndexOf("/") + 1)
// remove trailing slash if it's a regular series post
const urlPath =
(isFileDescriptor
? urlUntilLastPath.replace(/\/$/, "")
: urlUntilLastPath) + lastPath
// todo: separate interface for series descriptor (no word count and read time)
const postData: PostData = { const postData: PostData = {
title: markdownData.title, title: markdownData.title,
date: "", date: "",
@ -237,8 +242,12 @@ function parseSeries(data: DataToPass): void {
}) })
} }
/**
*
*/
// series markdown starting with 0 is a series descriptor // series markdown starting with 0 is a series descriptor
if (fileOrFolderName.startsWith("0")) { if (isFileDescriptor) {
map.series[urlPath] = { map.series[urlPath] = {
...postData, ...postData,
order: [], order: [],
@ -253,10 +262,14 @@ function parseSeries(data: DataToPass): void {
map.posts[urlPath] = postData map.posts[urlPath] = postData
// put series post in appropriate series
for (const key of Object.keys(map.series)) { for (const key of Object.keys(map.series)) {
if (urlPath.slice(0, urlPath.lastIndexOf("/")).includes(key)) { if (urlPath.includes(key)) {
const index = parseInt( const index = parseInt(
fileOrFolderName.slice(0, fileOrFolderName.lastIndexOf("_")) _urlPath.slice(
_urlPath.lastIndexOf("/") + 1,
_urlPath.lastIndexOf("_")
)
) )
if (isNaN(index)) if (isNaN(index))
@ -272,13 +285,14 @@ function parseSeries(data: DataToPass): void {
} else { } else {
seriesMap[key] = [itemToPush] seriesMap[key] = [itemToPush]
} }
break break
} }
} }
} }
/** /**
* * Save content
*/ */
writeToJSON( writeToJSON(
@ -291,7 +305,10 @@ function parseSeries(data: DataToPass): void {
} }
function parseUnsearchable(data: DataToPass): void { function parseUnsearchable(data: DataToPass): void {
const { urlPath, markdownData } = data const { urlPath: _urlPath, markdownData } = data
// convert path like /XXX/YYY/ZZZ to /YYY/ZZZ
const urlPath = _urlPath.slice(_urlPath.slice(1).indexOf("/") + 1)
addDocument({ addDocument({
title: markdownData.title, title: markdownData.title,
@ -304,6 +321,10 @@ function parseUnsearchable(data: DataToPass): void {
title: markdownData.title, title: markdownData.title,
} }
/**
* Save content
*/
writeToJSON( writeToJSON(
`${contentDirectoryPath}/unsearchable${urlPath}.json`, `${contentDirectoryPath}/unsearchable${urlPath}.json`,
JSON.stringify({ JSON.stringify({