diff --git a/CHANGELOG.md b/CHANGELOG.md index 070e980..2091f0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,14 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [0.0.1-alpha.171](https://github.com/DIG-Network/dig-chia-sdk/compare/v0.0.1-alpha.170...v0.0.1-alpha.171) (2024-10-26) + + +### Features + +* add interval promise ([380f5da](https://github.com/DIG-Network/dig-chia-sdk/commit/380f5dae168256e54cc9f0618e6aaa753546b2e5)) +* optionally use redis cache instead of node cache if the right env is set up ([9c48999](https://github.com/DIG-Network/dig-chia-sdk/commit/9c4899987512a9ed4984dd6d31f327c0ed162237)) + ### [0.0.1-alpha.170](https://github.com/DIG-Network/dig-chia-sdk/compare/v0.0.1-alpha.169...v0.0.1-alpha.170) (2024-10-10) diff --git a/package-lock.json b/package-lock.json index cb12db6..a31ce29 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@dignetwork/dig-sdk", - "version": "0.0.1-alpha.170", + "version": "0.0.1-alpha.171", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@dignetwork/dig-sdk", - "version": "0.0.1-alpha.170", + "version": "0.0.1-alpha.171", "license": "ISC", "dependencies": { "@dignetwork/datalayer-driver": "^0.1.29", @@ -34,6 +34,7 @@ "node-stun": "^0.1.2", "progress-stream": "^2.0.0", "proper-lockfile": "^4.1.2", + "redis": "^4.7.0", "superagent": "^10.0.0", "unzipper": "^0.12.3" }, @@ -696,6 +697,59 @@ "node": ">=14" } }, + "node_modules/@redis/bloom": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz", + "integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/client": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.6.0.tgz", + "integrity": "sha512-aR0uffYI700OEEH4gYnitAnv3vzVGXCFvYfdpu/CJKvk4pHfLPEy/JSZyrpQ+15WhXe1yJRXLtfQ84s4mEXnPg==", + "dependencies": { + "cluster-key-slot": "1.1.2", + "generic-pool": "3.9.0", + "yallist": "4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@redis/graph": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.1.tgz", + "integrity": "sha512-FEMTcTHZozZciLRl6GiiIB4zGm5z5F3F6a6FZCyrfxdKOhFlGkiAqlexWMBzCi4DcRoyiOsuLfW+cjlGWyExOw==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/json": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.7.tgz", + "integrity": "sha512-6UyXfjVaTBTJtKNG4/9Z8PSpKE6XgSyEb8iwaqDcy+uKrd/DGYHTWkUdnQDyzm727V7p21WUMhsqz5oy65kPcQ==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/search": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.2.0.tgz", + "integrity": "sha512-tYoDBbtqOVigEDMAcTGsRlMycIIjwMCgD8eR2t0NANeQmgK/lvxNAvYyb6bZDD4frHRhIHkJu2TBRvB0ERkOmw==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/time-series": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.1.0.tgz", + "integrity": "sha512-c1Q99M5ljsIuc4YdaCwfUEXsofakb9c8+Zse2qxTadu8TalLXuAESzLvFAvNVbkmSlvlzIQOLpBCmWI9wTOt+g==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, "node_modules/@scure/base": { "version": "1.1.9", "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.9.tgz", @@ -1656,6 +1710,14 @@ "node": ">=0.8" } }, + "node_modules/cluster-key-slot": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", + "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -2743,6 +2805,14 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/generic-pool": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz", + "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==", + "engines": { + "node": ">= 4" + } + }, "node_modules/get-caller-file": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", @@ -4595,6 +4665,19 @@ "node": ">=8" } }, + "node_modules/redis": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/redis/-/redis-4.7.0.tgz", + "integrity": "sha512-zvmkHEAdGMn+hMRXuMBtu4Vo5P6rHQjLoHftu+lBqq8ZTA3RCVC/WzD790bkKKiNFp7d5/9PcSD19fJyyRvOdQ==", + "dependencies": { + "@redis/bloom": "1.2.0", + "@redis/client": "1.6.0", + "@redis/graph": "1.1.1", + "@redis/json": "1.0.7", + "@redis/search": "1.2.0", + "@redis/time-series": "1.1.0" + } + }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -5495,8 +5578,7 @@ "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yargs": { "version": "16.2.0", diff --git a/package.json b/package.json index 16d514a..d68b2be 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@dignetwork/dig-sdk", - "version": "0.0.1-alpha.170", + "version": "0.0.1-alpha.171", "description": "", "type": "commonjs", "main": "./dist/index.js", @@ -50,6 +50,7 @@ "node-stun": "^0.1.2", "progress-stream": "^2.0.0", "proper-lockfile": "^4.1.2", + "redis": "^4.7.0", "superagent": "^10.0.0", "unzipper": "^0.12.3" }, diff --git a/src/DigNetwork/ContentServer.ts b/src/DigNetwork/ContentServer.ts index 0255b5b..1fc3e1a 100644 --- a/src/DigNetwork/ContentServer.ts +++ b/src/DigNetwork/ContentServer.ts @@ -2,12 +2,10 @@ import fs from "fs"; import http from "http"; import { URL } from "url"; import { Readable } from "stream"; -import { getOrCreateSSLCerts } from "../utils/ssl"; -import { formatHost } from "../utils/network"; -import NodeCache from "node-cache"; +import { formatHost, DigCache, getOrCreateSSLCerts } from "../utils"; -const hasRootHashCache = new NodeCache({ stdTTL: 86400 }); -const wellKnownCache = new NodeCache({ stdTTL: 86400 }); +const hasRootHashCache = new DigCache({ stdTTL: 86400 }); +const wellKnownCache = new DigCache({ stdTTL: 86400 }); export class ContentServer { private ipAddress: string; @@ -197,7 +195,7 @@ export class ContentServer { const cacheKey = `${this.storeId}-${rootHash}`; // Check if the result is already cached - const cachedResult = hasRootHashCache.get(cacheKey); + const cachedResult = await hasRootHashCache.get(cacheKey); if (cachedResult !== undefined) { return cachedResult; } diff --git a/src/DigNetwork/PropagationServer.ts b/src/DigNetwork/PropagationServer.ts index 11e197f..eca99a1 100644 --- a/src/DigNetwork/PropagationServer.ts +++ b/src/DigNetwork/PropagationServer.ts @@ -12,19 +12,17 @@ import * as zlib from "zlib"; import { asyncPool } from "../utils/promiseUtils"; import { createSpinner } from "nanospinner"; import { getFilePathFromSha256 } from "../utils/hashUtils"; -import { getOrCreateSSLCerts } from "../utils/ssl"; import { green, red, blue, yellow, cyan } from "colorette"; import { merkleIntegrityCheck } from "../utils/merkle"; import { PassThrough } from "stream"; import { promptCredentials } from "../utils/credentialsUtils"; import { STORE_PATH } from "../utils/config"; import { Wallet, DataStore } from "../blockchain"; -import { formatHost } from "../utils/network"; -import NodeCache from "node-cache"; +import { formatHost, DigCache, getOrCreateSSLCerts } from "../utils"; // Initialize cache with a TTL of 1 week (604800 seconds) -const storeExistsCache = new NodeCache({ stdTTL: 86400 }); -const pingUpdatecache = new NodeCache({ stdTTL: 86400 }); +const storeExistsCache = new DigCache({ stdTTL: 86400 }); +const pingUpdatecache = new DigCache({ stdTTL: 86400 }); // Helper function to trim long filenames with ellipsis and ensure consistent padding function formatFilename(filename: string | undefined, maxLength = 30): string { @@ -125,7 +123,7 @@ export class PropagationServer { const cacheKey = `${this.ipAddress}-${this.storeId}-${rootHash}`; // Check if response for this combination is already cached - if (pingUpdatecache.get(cacheKey) === "successfully synced") { + if ((await pingUpdatecache.get(cacheKey)) === "successfully synced") { return; } @@ -151,7 +149,9 @@ export class PropagationServer { try { const response = await axios.post(url, data, config); console.log( - green(`✔ Successfully pinged peer: ${this.ipAddress}`), + green( + `✔ Successfully pinged peer: ${this.ipAddress} ${this.storeId}` + ), response.data ); @@ -162,7 +162,10 @@ export class PropagationServer { return response.data; } catch (error: any) { - console.error(red(`✖ Failed to ping peer: ${this.ipAddress}`), error.message); + console.error( + red(`✖ Failed to ping peer: ${this.ipAddress}`), + error.message + ); console.error(red(error.message)); throw error; } @@ -215,7 +218,7 @@ export class PropagationServer { : `${this.storeId}-nohash`; // Check if the result is already cached - const cachedResult = storeExistsCache.get<{ + const cachedResult = await storeExistsCache.get<{ storeExists: boolean; rootHashExists: boolean; }>(cacheKey); diff --git a/src/blockchain/DataStore.ts b/src/blockchain/DataStore.ts index 72f2690..662f45b 100644 --- a/src/blockchain/DataStore.ts +++ b/src/blockchain/DataStore.ts @@ -35,13 +35,12 @@ import { CreateStoreUserInputs } from "../types"; import { askForStoreDetails } from "../prompts"; import { FileCache } from "../utils/FileCache"; import { DataStoreSerializer } from "./DataStoreSerializer"; -import NodeCache from "node-cache"; import { MAIN_NET_GENISES_CHALLENGE } from "../utils/config"; import { StoreMonitorRegistry } from "./StoreMonitorRegistry"; +import { DigCache } from "../utils"; // Initialize the cache with a TTL of 180 seconds (3 minutes) -const rootHistoryCache = new NodeCache({ stdTTL: 180 }); -const allStoresCache = new NodeCache({ stdTTL: 15 }); +const rootHistoryCache = new DigCache({ stdTTL: 180 }); const stat = promisify(fs.stat); const readdir = promisify(fs.readdir); @@ -337,7 +336,7 @@ export class DataStore { const deserializedStore = DataStoreSerializer.deserialize({ latestStore: cachedInfo.latestStore, latestHeight: cachedInfo.latestHeight.toString(), - latestHash: cachedInfo.latestHash + latestHash: cachedInfo.latestHash, }); return { @@ -419,7 +418,9 @@ export class DataStore { } // Check if the root history is cached for this storeId - const cachedHistory = rootHistoryCache.get(this.storeId); + const cachedHistory = await rootHistoryCache.get( + this.storeId + ); if (cachedHistory) { return cachedHistory; } diff --git a/src/blockchain/FullNodePeer.ts b/src/blockchain/FullNodePeer.ts index d0effc1..3b95917 100644 --- a/src/blockchain/FullNodePeer.ts +++ b/src/blockchain/FullNodePeer.ts @@ -7,7 +7,7 @@ import net from "net"; import { createSpinner } from "nanospinner"; import { MIN_HEIGHT, MIN_HEIGHT_HEADER_HASH } from "../utils/config"; import { Environment } from "../utils/Environment"; -import NodeCache from "node-cache"; +import { DigCache } from "../utils"; import Bottleneck from "bottleneck"; const FULLNODE_PORT = 8444; @@ -41,12 +41,12 @@ export class FullNodePeer { private static instance: FullNodePeer | null = null; // Cooldown cache to exclude faulty peers temporarily - private static cooldownCache = new NodeCache({ + private static cooldownCache = new DigCache({ stdTTL: COOLDOWN_DURATION / 1000, }); // Failed DNS hosts cooldown cache - private static failedDNSCache = new NodeCache({ stdTTL: 86400 }); + private static failedDNSCache = new DigCache({ stdTTL: 86400 }); // Peer reliability weights private static peerWeights: Map = new Map(); @@ -58,10 +58,10 @@ export class FullNodePeer { private static peerInfos: Map = new Map(); // Cache for fetched peer IPs - private static peerIPCache = new NodeCache({ stdTTL: CACHE_DURATION / 1000 }); + private static peerIPCache = new DigCache({ stdTTL: CACHE_DURATION / 1000 }); // Cache for DNS_HOST resolved IPs with a TTL of 3 days (259200 seconds) - private static dnsCache = new NodeCache({ + private static dnsCache = new DigCache({ stdTTL: 259200, checkperiod: 3600, }); @@ -207,7 +207,9 @@ export class FullNodePeer { } // Check if cached peer IPs exist - const cachedPeerIPs = FullNodePeer.peerIPCache.get("peerIPs"); + const cachedPeerIPs = await FullNodePeer.peerIPCache.get( + "peerIPs" + ); if (cachedPeerIPs) { return cachedPeerIPs; } @@ -216,7 +218,7 @@ export class FullNodePeer { const fetchedPeers: string[] = []; for (const DNS_HOST of DNS_HOSTS) { // Check if DNS_HOST is in failedDNSCache - if (FullNodePeer.failedDNSCache.has(DNS_HOST)) { + if (await FullNodePeer.failedDNSCache.has(DNS_HOST)) { continue; } @@ -224,8 +226,8 @@ export class FullNodePeer { let ips: string[] = []; // Check if DNS_HOST's IPs are cached - if (FullNodePeer.dnsCache.has(DNS_HOST)) { - ips = FullNodePeer.dnsCache.get(DNS_HOST) || []; + if (await FullNodePeer.dnsCache.has(DNS_HOST)) { + ips = (await FullNodePeer.dnsCache.get(DNS_HOST)) || []; } else { // Resolve DNS_HOST and cache the results ips = await resolve4(DNS_HOST); @@ -308,24 +310,28 @@ export class FullNodePeer { */ private static selectPeerRoundRobin(): string { const availablePrioritizedPeers = FullNodePeer.prioritizedPeers.filter( - (ip) => !FullNodePeer.cooldownCache.has(ip) && FullNodePeer.peerWeights.has(ip) + (ip) => + !FullNodePeer.cooldownCache.has(ip) && FullNodePeer.peerWeights.has(ip) ); - + if (availablePrioritizedPeers.length > 0) { // Select the first available prioritized peer return availablePrioritizedPeers[0]; } - + // If no prioritized peers are available, proceed with round-robin among other peers const regularPeers = Array.from(FullNodePeer.peerWeights.keys()).filter( - (ip) => !FullNodePeer.prioritizedPeers.includes(ip) && !FullNodePeer.cooldownCache.has(ip) + (ip) => + !FullNodePeer.prioritizedPeers.includes(ip) && + !FullNodePeer.cooldownCache.has(ip) ); - + if (regularPeers.length === 0) { throw new Error("No available peers to connect."); } - - const selectedPeer = regularPeers[FullNodePeer.roundRobinIndex % regularPeers.length]; + + const selectedPeer = + regularPeers[FullNodePeer.roundRobinIndex % regularPeers.length]; FullNodePeer.roundRobinIndex += 1; return selectedPeer; } @@ -457,13 +463,12 @@ export class FullNodePeer { return result; } catch (error: any) { FullNodePeer.handlePeerDisconnection(peerIP); - + // If the error is WebSocket-related or timeout, reset the peer if ( error.message.includes("WebSocket") || error.message.includes("Operation timed out") ) { - const newPeer = await this.getBestPeer(); return (newPeer as any)[prop](...args); } diff --git a/src/blockchain/ServerCoin.ts b/src/blockchain/ServerCoin.ts index db84d50..1aca8a3 100644 --- a/src/blockchain/ServerCoin.ts +++ b/src/blockchain/ServerCoin.ts @@ -13,14 +13,12 @@ import { Wallet } from "./Wallet"; import { NconfManager } from "../utils/NconfManager"; import { CoinData, ServerCoinData } from "../types"; import { DataStore } from "./DataStore"; -import NodeCache from "node-cache"; -import { getPublicHost } from "../utils/network"; -import { Environment } from "../utils/Environment"; +import { getPublicHost, DigCache, Environment } from "../utils"; const serverCoinCollateral = 300_000_000; // Initialize the cache with a TTL of 300 seconds (5 minutes) -const serverCoinPeersCache = new NodeCache({ stdTTL: 300 }); +const serverCoinPeersCache = new DigCache({ stdTTL: 300 }); export class ServerCoin { private storeId: string; @@ -194,7 +192,7 @@ export class ServerCoin { const cacheKey = `serverCoinPeers-${this.storeId}-${epoch}`; // Check if the result is already cached - const cachedPeers = serverCoinPeersCache.get(cacheKey); + const cachedPeers = await serverCoinPeersCache.get(cacheKey); if (cachedPeers) { return cachedPeers; } diff --git a/src/utils/DigCache.ts b/src/utils/DigCache.ts new file mode 100644 index 0000000..98fa919 --- /dev/null +++ b/src/utils/DigCache.ts @@ -0,0 +1,240 @@ +// src/cache/DigCache.ts +import NodeCache from "node-cache"; +import { createClient, RedisClientType } from "redis"; + +export interface ICache { + set(key: string, value: T, ttl?: number): Promise; + get(key: string): Promise; + del(key: string): Promise; + has(key: string): Promise; + flushAll(): Promise; + keys(pattern?: string): Promise; + ttl(key: string): Promise; + // Add other NodeCache methods as needed +} + +class DigCache implements ICache { + private cache: NodeCache | RedisClientType; + private useRedis: boolean; + + /** + * Constructor with the same signature as NodeCache. + * @param options - NodeCache options + */ + constructor(options?: NodeCache.Options) { + // Determine whether to use Redis based on the environment variable + this.useRedis = process.env.USE_REDIS === 'true'; + + if (this.useRedis) { + // Ensure REDIS_URL is provided + const redisUrl = process.env.REDIS_URL; + if (!redisUrl) { + throw new Error("REDIS_URL environment variable is not set."); + } + + const client: RedisClientType = createClient({ url: redisUrl }); + client.on("error", (err) => console.error("Redis Client Error", err)); + // Initialize connection in the initialize method + this.cache = client; + } else { + // Initialize NodeCache with provided options + this.cache = new NodeCache(options); + } + } + + /** + * Initializes the Redis connection if Redis is being used. + * Call this method before performing any cache operations. + */ + async initialize(): Promise { + if (this.useRedis) { + try { + await (this.cache as RedisClientType).connect(); + console.log("Connected to Redis successfully."); + } catch (error) { + console.error("Failed to connect to Redis:", error); + throw error; + } + } + } + + /** + * Gracefully disconnects the Redis client if Redis is being used. + * Call this method when your application is shutting down. + */ + async disconnect(): Promise { + if (this.useRedis) { + try { + await (this.cache as RedisClientType).quit(); + console.log("Disconnected from Redis successfully."); + } catch (error) { + console.error("Error disconnecting Redis:", error); + } + } + } + + async set(key: string, value: T, ttl?: number): Promise { + if (this.useRedis) { + try { + const serializedValue = JSON.stringify(value); + if (ttl !== undefined) { + // Redis TTL is in seconds + const result = await (this.cache as RedisClientType).set( + key, + serializedValue, + { EX: ttl } + ); + return result === "OK"; + } else { + const result = await (this.cache as RedisClientType).set( + key, + serializedValue + ); + return result === "OK"; + } + } catch (error) { + console.error(`Redis set error for key "${key}":`, error); + return false; + } + } else { + try { + const result = (this.cache as NodeCache).set(key, value, ttl ?? 0); + return result; + } catch (error) { + console.error(`NodeCache set error for key "${key}":`, error); + return false; + } + } + } + + async get(key: string): Promise { + if (this.useRedis) { + try { + const value = await (this.cache as RedisClientType).get(key); + if (value === null) { + return undefined; + } + return JSON.parse(value) as T; + } catch (error) { + console.error(`Redis get error for key "${key}":`, error); + return undefined; + } + } else { + try { + const value = (this.cache as NodeCache).get(key); + return value; + } catch (error) { + console.error(`NodeCache get error for key "${key}":`, error); + return undefined; + } + } + } + + async del(key: string): Promise { + if (this.useRedis) { + try { + const result = await (this.cache as RedisClientType).del(key); + return result; + } catch (error) { + console.error(`Redis del error for key "${key}":`, error); + return 0; + } + } else { + try { + const result = (this.cache as NodeCache).del(key); + return result; + } catch (error) { + console.error(`NodeCache del error for key "${key}":`, error); + return 0; + } + } + } + + async has(key: string): Promise { + if (this.useRedis) { + try { + const result = await (this.cache as RedisClientType).exists(key); + return result > 0; + } catch (error) { + console.error(`Redis exists error for key "${key}":`, error); + return false; + } + } else { + try { + const result = (this.cache as NodeCache).has(key); + return result; + } catch (error) { + console.error(`NodeCache has error for key "${key}":`, error); + return false; + } + } + } + + async flushAll(): Promise { + if (this.useRedis) { + try { + await (this.cache as RedisClientType).flushAll(); + return true; + } catch (error) { + console.error("Redis flushAll error:", error); + return false; + } + } else { + try { + (this.cache as NodeCache).flushAll(); + return true; + } catch (error) { + console.error("NodeCache flushAll error:", error); + return false; + } + } + } + + async keys(pattern: string = "*"): Promise { + if (this.useRedis) { + try { + const result = await (this.cache as RedisClientType).keys(pattern); + return result; + } catch (error) { + console.error(`Redis keys error with pattern "${pattern}":`, error); + return []; + } + } else { + try { + const result = (this.cache as NodeCache).keys(); + return result; + } catch (error) { + return []; + } + } + } + + async ttl(key: string): Promise { + if (this.useRedis) { + try { + const result = await (this.cache as RedisClientType).ttl(key); + return result; // TTL in seconds. -2: key does not exist, -1: no TTL + } catch (error) { + console.error(`Redis ttl error for key "${key}":`, error); + return -2; + } + } else { + const ttlMs = (this.cache as NodeCache).getTtl(key); + + if (ttlMs === undefined || ttlMs === 0) { + return -1; // No TTL set + } + + if (ttlMs < 0) { + return ttlMs; // Key does not exist or other Redis-specific responses + } + + const ttlSec = Math.floor((ttlMs - Date.now()) / 1000); + return ttlSec > 0 ? ttlSec : -2; + } + } + + // Implement other NodeCache methods as needed +} + +export { DigCache }; diff --git a/src/utils/PeerRanker.ts b/src/utils/PeerRanker.ts index f91171e..8b411a7 100644 --- a/src/utils/PeerRanker.ts +++ b/src/utils/PeerRanker.ts @@ -1,12 +1,12 @@ import axios, { AxiosRequestConfig } from 'axios'; import fs from 'fs'; import https from 'https'; -import NodeCache from 'node-cache'; // Import node-cache +import { DigCache } from './DigCache'; import { getOrCreateSSLCerts } from './ssl'; import { asyncPool } from './promiseUtils'; // Cache with TTL of 1 day (86400 seconds) -const peerCache = new NodeCache({ stdTTL: 86400 }); +const peerCache = new DigCache({ stdTTL: 86400 }); export interface PeerMetrics { ip: string; @@ -51,7 +51,7 @@ export class PeerRanker { * @returns Promise resolving to the latency in milliseconds or rejecting if the peer fails. */ private async measureLatency(ip: string): Promise { - const cachedMetrics = peerCache.get(ip); + const cachedMetrics = await peerCache.get(ip); if (cachedMetrics && cachedMetrics.latency) { return cachedMetrics.latency; } @@ -107,7 +107,7 @@ export class PeerRanker { * @returns Promise resolving to the upload bandwidth in bytes per second or rejecting if the peer fails. */ private async measureBandwidth(ip: string): Promise { - const cachedMetrics = peerCache.get(ip); + const cachedMetrics = await peerCache.get(ip); if (cachedMetrics && cachedMetrics.bandwidth) { return cachedMetrics.bandwidth; } diff --git a/src/utils/index.ts b/src/utils/index.ts index 62ac58c..91dadc3 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -12,4 +12,5 @@ export * from './ssl'; export * from './validationUtils'; export * from './Environment'; export * from './promiseUtils'; -export * from './PeerRanker'; \ No newline at end of file +export * from './PeerRanker'; +export * from './DigCache'; \ No newline at end of file diff --git a/src/utils/promiseUtils.ts b/src/utils/promiseUtils.ts index 4ff17cc..60208c2 100644 --- a/src/utils/promiseUtils.ts +++ b/src/utils/promiseUtils.ts @@ -51,3 +51,35 @@ export const withTimeout = ( ), ]); }; + +/** + * Wraps a promise to execute a callback every 30 seconds while the promise is pending. + * + * @param {Promise} promise - The promise to wrap. + * @param {() => void} callback - The callback function to call every 30 seconds. + * @returns {Promise} - A new promise that resolves or rejects with the original promise's result. + */ +export const withIntervalCallback = ( + promise: Promise, + callback: () => void +): Promise => { + const intervalTime = 30000; // 30 seconds in milliseconds + + let intervalId: NodeJS.Timeout; + + // Start the interval that calls the callback every 30 seconds + intervalId = setInterval(() => { + callback(); + }, intervalTime); + + // Return a new promise that clears the interval when the original promise settles + return promise + .then((result) => { + clearInterval(intervalId); + return result; + }) + .catch((error) => { + clearInterval(intervalId); + throw error; + }); +}; \ No newline at end of file