Skip to content

Commit

Permalink
chore: normalise promises (#6177)
Browse files Browse the repository at this point in the history
* chore: normalise promises

* Update next-data/generators/websiteFeeds.mjs

Co-authored-by: Caner Akdas <[email protected]>
Signed-off-by: Brian Muenzenmeyer <[email protected]>

---------

Signed-off-by: Brian Muenzenmeyer <[email protected]>
Co-authored-by: Brian Muenzenmeyer <[email protected]>
Co-authored-by: Caner Akdas <[email protected]>
  • Loading branch information
3 people authored Dec 4, 2023
1 parent 48b8845 commit 03ebfc9
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 68 deletions.
37 changes: 22 additions & 15 deletions next-data/generators/blogData.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import readline from 'node:readline';

import graymatter from 'gray-matter';

import * as nextHelpers from '../../next.helpers.mjs';
import { getMarkdownFiles } from '../../next.helpers.mjs';

// gets the current blog path based on local module path
const blogPath = join(process.cwd(), 'pages/en/blog');
Expand Down Expand Up @@ -54,36 +54,38 @@ const getFrontMatter = (filename, source) => {
*/
const generateBlogData = async () => {
// we retrieve all the filenames of all blog posts
const filenames = await nextHelpers.getMarkdownFiles(
process.cwd(),
'pages/en/blog',
['**/index.md', '**/pagination.md']
);
const filenames = await getMarkdownFiles(process.cwd(), 'pages/en/blog', [
'**/index.md',
'**/pagination.md',
]);

return new Promise(resolve => {
const blogPosts = [];
const rawFrontmatter = [];

for (const filename of filenames) {
let rawFrontmatter = '';
let countOfSeparators = 0;

filenames.forEach(filename => {
// We create a stream for reading a file instead of reading the files
const _stream = createReadStream(join(blogPath, filename));

// We create a readline interface to read the file line-by-line
const _readLine = readline.createInterface({ input: _stream });

// Creates an array of the metadata based on the filename
// This prevents concurrency issues since the for-loop is synchronous
// and these event listeners are not
rawFrontmatter[filename] = [0, ''];

// We read line by line
_readLine.on('line', line => {
rawFrontmatter += `${line}\n`;
rawFrontmatter[filename][1] += `${line}\n`;

// We observe the frontmatter separators
if (line === '---') {
countOfSeparators += 1;
rawFrontmatter[filename][0] += 1;
}

// Once we have two separators we close the readLine and the stream
if (countOfSeparators === 2) {
if (rawFrontmatter[filename][0] === 2) {
_readLine.close();
_stream.close();
}
Expand All @@ -93,7 +95,12 @@ const generateBlogData = async () => {
// This allows us to only read the frontmatter part of each file
// and optimise the read-process as we have thousands of markdown files
_readLine.on('close', () => {
blogPosts.push(getFrontMatter(filename, rawFrontmatter));
const frontmatter = getFrontMatter(
filename,
rawFrontmatter[filename][1]
);

blogPosts.push(frontmatter);

// Once we finish reading all fles
if (blogPosts.length === filenames.length) {
Expand All @@ -104,7 +111,7 @@ const generateBlogData = async () => {
});
}
});
}
});
});
};

Expand Down
26 changes: 11 additions & 15 deletions next-data/generators/releaseData.mjs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
'use strict';

import nodevu from '@nodevu/core';

// Gets the appropriate release status for each major release
const getNodeReleaseStatus = (now, support) => {
const { endOfLife, maintenanceStart, ltsStart, currentStart } = support;
Expand Down Expand Up @@ -29,10 +31,8 @@ const getNodeReleaseStatus = (now, support) => {
*
* @returns {Promise<import('../../types').NodeRelease[]>}
*/
const generateReleaseData = async () => {
return import('@nodevu/core').then(async ({ default: nodevu }) => {
const nodevuOutput = await nodevu({ fetch: fetch });

const generateReleaseData = () => {
return nodevu({ fetch: fetch }).then(nodevuOutput => {
// Filter out those without documented support
// Basically those not in schedule.json
const majors = Object.values(nodevuOutput).filter(major => !!major.support);
Expand Down Expand Up @@ -64,17 +64,13 @@ const generateReleaseData = async () => {
};
});

return Promise.resolve(
// nodevu returns duplicated v0.x versions (v0.12, v0.10, ...).
// This behavior seems intentional as the case is hardcoded in nodevu,
// see https://github.com/cutenode/nodevu/blob/0c8538c70195fb7181e0a4d1eeb6a28e8ed95698/core/index.js#L24.
// This line ignores those duplicated versions and takes the latest
// v0.x version (v0.12.18). It is also consistent with the legacy
// nodejs.org implementation.
nodeReleases.filter(
release => release.major !== 0 || release.version === '0.12.18'
)
);
// nodevu returns duplicated v0.x versions (v0.12, v0.10, ...).
// This behavior seems intentional as the case is hardcoded in nodevu,
// see https://github.com/cutenode/nodevu/blob/0c8538c70195fb7181e0a4d1eeb6a28e8ed95698/core/index.js#L24.
// This line ignores those duplicated versions and takes the latest
// v0.x version (v0.12.18). It is also consistent with the legacy
// nodejs.org implementation.
return nodeReleases.filter(r => r.major !== 0 || r.version === '0.12.18');
});
};

Expand Down
77 changes: 39 additions & 38 deletions next-data/generators/websiteFeeds.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -5,50 +5,51 @@ import { Feed } from 'feed';
import { BASE_URL, BASE_PATH } from '../../next.constants.mjs';
import { siteConfig } from '../../next.json.mjs';

// This is the Base URL for the Node.js Website
// with English locale (which is where the website feeds run)
const canonicalUrl = `${BASE_URL}${BASE_PATH}/en`;

/**
* This method generates RSS website feeds based on the current website configuration
* and the current blog data that is available
*
* @param {Promise<import('../../types').BlogDataRSC>} blogData
*/
const generateWebsiteFeeds = async blogData => {
const canonicalUrl = `${BASE_URL}${BASE_PATH}/en`;

// Wait for the Blog Data for being generate
const { posts } = await blogData;

/**
* This generates all the Website RSS Feeds that are used for the website
*
* @type {[string, Feed][]}
*/
const websiteFeeds = siteConfig.rssFeeds.map(
({ category, title, description, file }) => {
const feed = new Feed({
id: file,
title: title,
language: 'en',
link: `${canonicalUrl}/feed/${file}`,
description: description || description,
});

const blogFeedEntries = posts
.filter(post => !category || post.category === category)
.map(post => ({
id: post.slug,
title: post.title,
author: post.author,
date: new Date(post.date),
link: `${canonicalUrl}${post.slug}`,
}));

blogFeedEntries.forEach(entry => feed.addItem(entry));

return [file, feed];
}
);

return new Map(websiteFeeds);
const generateWebsiteFeeds = blogData => {
return blogData.then(({ posts }) => {
/**
* This generates all the Website RSS Feeds that are used for the website
*
* @type {[string, Feed][]}
*/
const websiteFeeds = siteConfig.rssFeeds.map(
({ category, title, description, file }) => {
const feed = new Feed({
id: file,
title: title,
language: 'en',
link: `${canonicalUrl}/feed/${file}`,
description: description,
});

const blogFeedEntries = posts
.filter(post => !category || post.category === category)
.map(post => ({
id: post.slug,
title: post.title,
author: post.author,
date: new Date(post.date),
link: `${canonicalUrl}${post.slug}`,
}));

blogFeedEntries.forEach(entry => feed.addItem(entry));

return [file, feed];
}
);

return new Map(websiteFeeds);
});
};

export default generateWebsiteFeeds;

0 comments on commit 03ebfc9

Please sign in to comment.