diff --git a/README.md b/README.md index 3aac2b4..c0b5189 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ https://nhentai.net was Clouflare protection enabled, for default jandapress use ![image](https://cdn.discordapp.com/attachments/952117487166705747/1073694957111627906/Screenshot_265.png) ### The solution -You will need instance such as VPS and install `chorme` or `chromium` or `firefox`, You have to set `NHENTAI_IP_ORIGIN` to `false`, set `COOKIE` and `USER_AGENT`. We'll simulate the request with [tough-cookie](https://github.com/salesforce/tough-cookie) and [http-cookie-agent](https://www.npmjs.com/package/http-cookie-agent) +You will need instance such as VPS and install Chrome or Chromium or Firefox, You have to set `NHENTAI_IP_ORIGIN` to `false`, set `COOKIE` and `USER_AGENT`. We'll simulate the request with [tough-cookie](https://github.com/salesforce/tough-cookie) and [http-cookie-agent](https://www.npmjs.com/package/http-cookie-agent) ![image](https://cdn.discordapp.com/attachments/952117487166705747/1073699069643468902/Screenshot_267_copy.jpg) diff --git a/package.json b/package.json index 968126b..848d193 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "jandapress", - "version": "2.1.4-alpha", + "version": "2.1.5-alpha", "description": "RESTful and experimental API for the Doujinshi, Pressing the whole nhentai, pururin, hentaifox, and more.. where the official one is lack.", "main": "build/src/index.js", "scripts": { diff --git a/src/scraper/nhentai/nhentaiGetController.ts b/src/scraper/nhentai/nhentaiGetController.ts index 1a036f3..8af62f0 100644 --- a/src/scraper/nhentai/nhentaiGetController.ts +++ b/src/scraper/nhentai/nhentaiGetController.ts @@ -21,7 +21,7 @@ interface INhentaiGet { num_favorites: number; artist: string[]; group: string; - parodies: string; + parodies: string[]; characters: string[]; upload_date: string; } @@ -47,7 +47,11 @@ export async function scrapeContent(url: string, random = false) { //get all tags.name const tagsRaw = raw.tags; - const tags = Object.keys(tagsRaw).map((key) => tagsRaw[parseInt(key)].name); + // all tags without filter + // const tags = Object.keys(tagsRaw).map((key) => tagsRaw[parseInt(key)].name); + + const tagsFilter = tagsRaw.filter((tag) => tag.type === "tag"); + const tags = tagsFilter.map((tag) => tag.name).sort() || []; const artistRaw = tagsRaw.filter((tag) => tag.type === "artist"); const artist = artistRaw.map((tag) => tag.name) || []; @@ -56,11 +60,11 @@ export async function scrapeContent(url: string, random = false) { const languageRaw = tagsRaw.find((tag) => tag.type === "language"); const language = languageRaw ? languageRaw.name : ""; - const parodiesRaw = tagsRaw.find((tag) => tag.type === "parody"); - const parodies = parodiesRaw ? parodiesRaw.name : ""; + const parodiesRaw = tagsRaw.filter((tag) => tag.type === "parody"); + const parodies = parodiesRaw.map((tag) => tag.name) || []; const groupRaw = tagsRaw.find((tag) => tag.type === "group"); - const group = groupRaw ? groupRaw.name : ""; + const group = groupRaw ? groupRaw.name : "None"; //get all "type": "character" in tagsRaw const charactersRaw = tagsRaw.filter((tag) => tag.type === "character");