diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 16da035..855ee7e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,44 +10,88 @@ on: jobs: test: + env: + CI: true timeout-minutes: 10 runs-on: ubuntu-latest strategy: matrix: - node-version: [14.x, 16.x, 18.x, 20.x] + node-version: [18.x] + kernel: [6.5] steps: - name: Checkout uses: actions/checkout@v4 - - name: Install node - uses: actions/setup-node@v4 - with: - node-version: ${{ matrix.node-version }} + - name: Uninstall all linux-headers packages + run: | + sudo apt-get remove --purge -y 'linux-headers-*' - - name: install redis-cli - run: sudo apt-get install redis-tools + - name: Reinstall specific linux-headers package + run: | + sudo apt-get update + sudo apt-get install -y linux-headers-$(uname -r) - - name: Install dependencies - run: npm install + - name: Install build dependencies + run: | + sudo apt-get update && sudo apt-get install -y build-essential linux-headers-$(uname -r) wget unzip libelf-dev linux-source-6.5.0 + tar jxvf /usr/src/linux-source-6.5.0.tar.bz2 + cd linux-source-6.5.0 - - name: Setup Standalone Tests - run: make test-standalone-setup + - name: Configure kernel sources + run: | + wget https://github.com/veithen/knetstat/archive/refs/heads/master.zip + unzip master.zip + cd knetstat-master + # wget https://cl.archive.ubuntu.com/ubuntu/pool/main/l/linux-azure-6.5/linux-azure-6.5_6.5.0.orig.tar.gz && tar xvzf linux-azure-6.5_6.5.0.orig.tar.gz + mv ../linux-source-6.5.0 linux-6.5 + cp /boot/config-$(uname -r) linux-${{ matrix.kernel }}/.config + make -C linux-${{ matrix.kernel }} clean + yes "" | make -C linux-${{ matrix.kernel }} oldconfig + make -C linux-${{ matrix.kernel }} modules_prepare + sed -i '/Module.symvers/d' ./Makefile + make KBUILD_MODPOST_WARN=1 KSRC=linux-${{ matrix.kernel }} + sudo insmod knetstat.ko + sudo lsmod | grep knetstat + - name: dmesg + if: always() + run: | + sudo dmesg|tail -n 10 + ls -la /lib/modules + - - name: Run Standalone tests - run: make test-standalone + - name: run knetstat + run: cat /proc/net/tcpstat - - name: Teardown Standalone Tests - run: make test-standalone-teardown - - - name: Setup Clustered Tests - run: make test-cluster-setup - - - name: Check Redis Cluster - run: timeout 60 bash <<< "until redis-cli -c -p 16371 cluster info | grep 'cluster_state:ok'; do sleep 1; done" - - - name: Run Clustered tests - run: make test-cluster - - - name: Teardown Clustered Tests - run: make test-cluster-teardown +# - name: Install node +# uses: actions/setup-node@v4 +# with: +# node-version: ${{ matrix.node-version }} +# +# - name: install redis-cli +# run: sudo apt-get install redis-tools +# +# +# - name: Install dependencies +# run: npm install +# +# - name: Setup Standalone Tests +# run: make test-standalone-setup +# +# - name: Run Standalone tests +# run: make test-standalone +# +# - name: Teardown Standalone Tests +# run: make test-standalone-teardown +# +# - name: Setup Clustered Tests +# run: make test-cluster-setup +# +# - name: Check Redis Cluster +# run: timeout 60 bash <<< "until redis-cli -c -p 16371 cluster info | grep 'cluster_state:ok'; do sleep 1; done" +# +# - name: Run Clustered tests +# run: make test-cluster +# +# - name: Teardown Clustered Tests +# run: make test-cluster-teardown diff --git a/README.md b/README.md index dc5e939..cc8e47d 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,6 @@ It's a fork from [LimitDB](https://github.com/limitd/limitdb). - [Configure](#configure) - [Options available](#options-available) - [Buckets](#buckets) - - [Ping](#ping) - [Overrides](#overrides) - [ERL (Elevated Rate Limits)](#erl-elevated-rate-limits) - [Prerequisites](#prerequisites) @@ -55,11 +54,6 @@ const limitd = new Limitd({ } }, prefix: 'test:', - ping: { - interval: 1000, - maxFailedAttempts: 5, - reconnectIfFailed: true - }, username: 'username', password: 'password' }); @@ -71,9 +65,9 @@ const limitd = new Limitd({ - `nodes` (array): [Redis Cluster Configuration](https://github.com/luin/ioredis#cluster). - `buckets` (object): Setup your bucket types. - `prefix` (string): Prefix keys in Redis. -- `ping` (object): Configure ping to Redis DB. - `username` (string): Redis username. This is ignored if not in Cluster mode. Needs Redis >= v6. - `password` (string): Redis password. +- `keepAlive` (number): TCP KeepAlive on the socket expressed in milliseconds. Set to a non-number value to disable keepAlive ### Buckets: @@ -91,14 +85,6 @@ If you omit `size`, limitdb assumes that `size` is the value of `per_interval`. If you don't specify a filling rate with `per_interval` or any other `per_x`, the bucket is fixed and you have to manually reset it using `PUT`. -### Ping: - -- `interval` (number): represents the time between two consecutive pings. Default: 3000. -- `maxFailedAttempts` (number): is the allowed number of failed pings before declaring the connection as dead. Default: 5. -- `reconnectIfFailed` (boolean): indicates whether we should try to reconnect is the connection is declared dead. Default: true. - - - ## Overrides You can also define `overrides` inside your type definitions as follows: diff --git a/lib/client.js b/lib/client.js index 0fe058e..14cb573 100644 --- a/lib/client.js +++ b/lib/client.js @@ -31,7 +31,7 @@ class LimitdRedis extends EventEmitter { this.db = new LimitDBRedis(_.pick(params, [ 'uri', 'nodes', 'buckets', 'prefix', 'slotsRefreshTimeout', 'slotsRefreshInterval', - 'username', 'password', 'tls', 'dnsLookup', 'globalTTL', 'cacheSize', 'ping'])); + 'username', 'password', 'tls', 'dnsLookup', 'globalTTL', 'cacheSize'])); this.db.on('error', (err) => { this.emit('error', err); diff --git a/lib/db.js b/lib/db.js index e594798..fdcd689 100644 --- a/lib/db.js +++ b/lib/db.js @@ -6,7 +6,6 @@ const LRU = require('lru-cache'); const utils = require('./utils'); const Redis = require('ioredis'); const { validateParams, validateERLParams } = require('./validation'); -const DBPing = require("./db_ping"); const { calculateQuotaExpiration, resolveElevatedParams, isFixedWindowEnabled, removeHashtag } = require('./utils'); const EventEmitter = require('events').EventEmitter; @@ -14,27 +13,10 @@ const TAKE_LUA = fs.readFileSync(`${__dirname}/take.lua`, "utf8"); const TAKE_ELEVATED_LUA = fs.readFileSync(`${__dirname}/take_elevated.lua`, "utf8"); const PUT_LUA = fs.readFileSync(`${__dirname}/put.lua`, "utf8"); -const PING_SUCCESS = "successful"; -const PING_ERROR = "error"; -const PING_RECONNECT = "reconnect"; -const PING_RECONNECT_DRY_RUN = "reconnect-dry-run"; - const DEFAULT_COMMAND_TIMEOUT = 125; // Milliseconds +const DEFAULT_KEEPALIVE = 10000; // Milliseconds class LimitDBRedis extends EventEmitter { - static get PING_SUCCESS() { - return PING_SUCCESS; - } - static get PING_ERROR() { - return PING_ERROR; - } - static get PING_RECONNECT() { - return PING_RECONNECT; - } - static get PING_RECONNECT_DRY_RUN() { - return PING_RECONNECT_DRY_RUN; - } - /** * Creates an instance of LimitDB client for Redis. * @param {params} params - The configuration for the database and client. @@ -63,6 +45,7 @@ class LimitDBRedis extends EventEmitter { keyPrefix: config.prefix, password: config.password, tls: config.tls, + keepAlive: config.keepAlive || DEFAULT_KEEPALIVE, reconnectOnError: (err) => { // will force a reconnect when error starts with `READONLY` // this code is only triggered when auto-failover is disabled @@ -88,7 +71,6 @@ class LimitDBRedis extends EventEmitter { this.redis = new Redis.Cluster(config.nodes, clusterOptions); } else { this.redis = new Redis(config.uri, redisOptions); - this.#setupPing(config); } this.redis.defineCommand('take', { @@ -120,24 +102,7 @@ class LimitDBRedis extends EventEmitter { } - #setupPing(config) { - this.redis.on("ready", () => this.#startPing(config)); - this.redis.on("close", () => this.#stopPing()); - } - - #startPing(config) { - this.#stopPing(); - this.ping = new DBPing(config.ping, this.redis); - this.ping.on("ping", (data) => this.emit("ping", data)); - } - - #stopPing() { - this.ping?.stop(); - this.ping?.removeAllListeners(); - } - close(callback) { - this.#stopPing(); this.redis.quit(callback); } diff --git a/lib/db_ping.js b/lib/db_ping.js deleted file mode 100644 index f564d61..0000000 --- a/lib/db_ping.js +++ /dev/null @@ -1,106 +0,0 @@ -const cbControl = require('./cb'); -const utils = require("./utils"); -const EventEmitter = require("events").EventEmitter; - -const PING_SUCCESS = "successful"; -const PING_ERROR = "error"; -const PING_RECONNECT = "reconnect"; -const PING_RECONNECT_DRY_RUN = "reconnect-dry-run"; - -const DEFAULT_PING_INTERVAL = 3000; // Milliseconds - -class DBPing extends EventEmitter { - constructor(config, redis) { - super(); - - this.redis = redis; - this.config = { - commandTimeout: 125, - enabled: config ? true : false, - interval: config?.interval || DEFAULT_PING_INTERVAL, - maxFailedAttempts: config?.maxFailedAttempts || 5, - reconnectIfFailed: - utils.functionOrFalse(config?.reconnectIfFailed) || (() => false), - }; - - this.failedPings = 0; - - this.#start(); - } - - #start() { - const doPing = () => { - if (!this.config.enabled) { - return; - } - - let start = Date.now(); - this.redis.ping(cbControl((err) => { - let duration = Date.now() - start; - err - ? this.#pingKO(triggerLoop, err, duration) - : this.#pingOK(triggerLoop, duration); - }).timeout(this.config.commandTimeout)); - }; - - const triggerLoop = () => setTimeout(doPing, this.config.interval); - - doPing(); - } - - stop() { - this.enabled = false; - } - - #pingOK(callback, duration) { - this.reconnecting = false; - this.failedPings = 0; - this.#emitPingResult(PING_SUCCESS, undefined, duration, 0); - callback(); - } - - #pingKO(callback, err, duration) { - this.failedPings++; - this.#emitPingResult(PING_ERROR, err, duration, this.failedPings); - - if (this.failedPings < this.config.maxFailedAttempts) { - return callback(); - } - - if (!this.config.reconnectIfFailed()) { - return this.#emitPingResult( - PING_RECONNECT_DRY_RUN, - undefined, - 0, - this.failedPings - ); - } - - this.#retryStrategy(() => { - this.#emitPingResult(PING_RECONNECT, undefined, 0, this.failedPings); - this.redis.disconnect(true); - }); - } - - #emitPingResult(status, err, duration, failedPings) { - const result = { - status: status, - duration: duration, - error: err, - failedPings: failedPings, - }; - this.emit("ping", result); - } - - #retryStrategy(callback) { - //jitter between 0% and 10% of the total wait time needed to reconnect - //i.e. if interval = 100 and maxFailedAttempts = 3 => it'll randomly jitter between 0 and 30 ms - const deviation = - utils.randomBetween(0, 0.1) * - this.config.interval * - this.config.maxFailedAttempts; - setTimeout(callback, deviation); - } -} - -module.exports = DBPing; diff --git a/package.json b/package.json index 8ede626..fb961a6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "limitd-redis", - "version": "8.3.1", + "version": "8.4.0", "description": "A database client for limits on top of redis", "main": "index.js", "repository": { @@ -28,8 +28,10 @@ "eslint": "^6.1.0", "mocha": "^5.2.0", "mockdate": "^3.0.5", + "net-keepalive": "^4.0.10", "nyc": "^14.1.1", "sinon": "^19.0.2", + "sockopt": "^2.0.1", "toxiproxy-node-client": "^2.0.6" } } diff --git a/test/db.clustermode.tests.js b/test/db.clustermode.tests.js index 1dd614a..56eb4a4 100644 --- a/test/db.clustermode.tests.js +++ b/test/db.clustermode.tests.js @@ -12,7 +12,7 @@ describe('when using LimitDB', () => { return new LimitDB({ nodes: clusterNodes, buckets: {}, prefix: 'tests:', ..._.omit(params, ['uri']) }); }; - dbTests(clientCreator); + dbTests(clientCreator, { clusterNodes }); describe('when using the clustered #constructor', () => { it('should allow setting username and password', (done) => { @@ -28,4 +28,4 @@ describe('when using LimitDB', () => { }); }); }) -}); \ No newline at end of file +}); diff --git a/test/db.standalonemode.tests.js b/test/db.standalonemode.tests.js index fb8784e..68db900 100644 --- a/test/db.standalonemode.tests.js +++ b/test/db.standalonemode.tests.js @@ -10,10 +10,10 @@ const crypto = require('crypto'); describe('when using LimitDB', () => { describe('in standalone mode', () => { const clientCreator = (params) => { - return new LimitDB({ uri: 'localhost', buckets: {}, prefix: 'tests:', ..._.omit(params, ['nodes']) }); + return new LimitDB({ uri: 'localhost:6379', buckets: {}, prefix: 'tests:', ..._.omit(params, ['nodes']) }); }; - dbTests(clientCreator); + dbTests(clientCreator, { uri: 'localhost:6379' } ); describe('when using the standalone #constructor', () => { it('should emit error on failure to connect to redis', (done) => { @@ -27,171 +27,5 @@ describe('when using LimitDB', () => { }); }); }); - - describe('LimitDBRedis Ping', () => { - - let ping = { - enabled: () => true, - interval: 10, - maxFailedAttempts: 3, - reconnectIfFailed: () => true, - maxFailedAttemptsToRetryReconnect: 10 - }; - - let config = { - uri: 'localhost:22222', - buckets, - prefix: 'tests:', - ping, - }; - - let redisProxy; - let toxiproxy; - let db; - - beforeEach((done) => { - toxiproxy = new Toxiproxy('http://localhost:8474'); - proxyBody = { - listen: '0.0.0.0:22222', - name: crypto.randomUUID(), //randomize name to avoid concurrency issues - upstream: 'redis:6379' - }; - toxiproxy.createProxy(proxyBody) - .then((proxy) => { - redisProxy = proxy; - done(); - }); - - }); - - afterEach((done) => { - redisProxy.remove().then(() => - db.close((err) => { - // Can't close DB if it was never open - if (err?.message.indexOf('enableOfflineQueue') > 0 || err?.message.indexOf('Connection is closed') >= 0) { - err = undefined; - } - done(err); - }) - ); - }); - - it('should emit ping success', (done) => { - db = createDB({ uri: 'localhost:22222', buckets, prefix: 'tests:', ping }, done); - db.once(('ping'), (result) => { - if (result.status === LimitDB.PING_SUCCESS) { - done(); - } - }); - }); - - it('should emit "ping - error" when redis stops responding pings', (done) => { - let called = false; - - db = createDB(config, done); - db.once(('ready'), () => addLatencyToxic(redisProxy, 20000, noop)); - db.on(('ping'), (result) => { - if (result.status === LimitDB.PING_ERROR && !called) { - called = true; - db.removeAllListeners('ping'); - done(); - } - }); - }); - - it('should emit "ping - reconnect" when redis stops responding pings and client is configured to reconnect', (done) => { - let called = false; - db = createDB(config, done); - db.once(('ready'), () => addLatencyToxic(redisProxy, 20000, noop)); - db.on(('ping'), (result) => { - if (result.status === LimitDB.PING_RECONNECT && !called) { - called = true; - db.removeAllListeners('ping'); - done(); - } - }); - }); - - it('should emit "ping - reconnect dry run" when redis stops responding pings and client is NOT configured to reconnect', (done) => { - let called = false; - db = createDB({ ...config, ping: { ...ping, reconnectIfFailed: () => false } }, done); - db.once(('ready'), () => addLatencyToxic(redisProxy, 20000, noop)); - db.on(('ping'), (result) => { - if (result.status === LimitDB.PING_RECONNECT_DRY_RUN && !called) { - called = true; - db.removeAllListeners('ping'); - done(); - } - }); - }); - - it(`should NOT emit ping events when config.ping is not set`, (done) => { - db = createDB({ ...config, ping: undefined }, done); - - db.once(('ping'), (result) => { - done(new Error(`unexpected ping event emitted ${result}`)); - }); - - //If after 100ms there are no interactions, we mark the test as passed. - setTimeout(done, 100); - }); - - it('should recover from a connection loss', (done) => { - let pingResponded = false; - let reconnected = false; - let toxic = undefined; - let timeoutId; - db = createDB({ ...config, ping: { ...ping, interval: 50 } }, done); - - db.on(('ping'), (result) => { - if (result.status === LimitDB.PING_SUCCESS) { - if (!pingResponded) { - pingResponded = true; - toxic = addLatencyToxic(redisProxy, 20000, (t) => toxic = t); - } else if (reconnected) { - clearTimeout(timeoutId); - db.removeAllListeners('ping'); - done(); - } - } else if (result.status === LimitDB.PING_RECONNECT) { - if (pingResponded && !reconnected) { - reconnected = true; - toxic.remove(); - } - } - }); - - timeoutId = setTimeout(() => done(new Error('Not reconnected')), 1800); - }); - - const createDB = (config, done) => { - let tmpDB = new LimitDB(config); - - tmpDB.on(('error'), (err) => { - //As we actively close the connection, there might be network-related errors while attempting to reconnect - if (err?.message.indexOf('enableOfflineQueue') > 0 || err?.message.indexOf('Command timed out') >= 0) { - err = undefined; - } - - if (err) { - done(err); - } - }); - - return tmpDB; - }; - - const addLatencyToxic = (proxy, latency, callback) => { - let toxic = new Toxic( - proxy, - { type: 'latency', attributes: { latency: latency } } - ); - proxy.addToxic(toxic).then(callback); - }; - - - const noop = () => { - }; - }); }) -}); \ No newline at end of file +}); diff --git a/test/db.tests.js b/test/db.tests.js index 163aaea..175014b 100644 --- a/test/db.tests.js +++ b/test/db.tests.js @@ -5,6 +5,8 @@ const _ = require('lodash'); const assert = require('chai').assert; const { endOfMonthTimestamp, replicateHashtag } = require('../lib/utils'); const sinon = require('sinon'); +const { exec } = require('child_process'); +const { getSockOptValue, assertIsNear, dropPackets } = require('./testutils'); const buckets = { ip: { @@ -120,7 +122,7 @@ const elevatedBuckets = { module.exports.buckets = buckets; module.exports.elevatedBuckets = elevatedBuckets; -module.exports.tests = (clientCreator) => { +module.exports.tests = (clientCreator, opts) => { describe('LimitDBRedis', () => { let db; const prefix = 'tests:' @@ -143,6 +145,47 @@ module.exports.tests = (clientCreator) => { }); }); + describe('KeepAlive', () => { + describe('when keepalive is not specified and the socket stops responding', () => { + it('should try to reconnect after 10 seconds', (done) => { + let dropTime, reconnectTime; + db.redis.on('connect', () => { + if (reconnectTime) { + done(); + } + }); + + db.redis.on('reconnecting', () => { + reconnectTime = Date.now(); + assertIsNear(reconnectTime - dropTime, 10000, 100); + }); + + dropPackets(db.redis.options.host); + dropTime = Date.now(); + }).timeout(15000); + }); + + describe('when keepalive is specific and the socket stops responding', () => { + it('should try to reconnect after the specified time', (done) => { + db = clientCreator({ buckets, prefix: prefix, keepalive: 5000 }); + let dropTime, reconnectTime; + db.redis.on('connect', () => { + if (reconnectTime) { + done(); + } + }); + + db.redis.on('reconnecting', () => { + reconnectTime = Date.now(); + assertIsNear(reconnectTime - dropTime, 10000, 100); + }); + + dropPackets(db.redis.options.host); + dropTime = Date.now(); + }).timeout(10000); + }); + }); + describe('#constructor', () => { it('should throw an when missing redis information', () => { assert.throws(() => clientCreator({ diff --git a/test/getsockopt_cli b/test/getsockopt_cli new file mode 100755 index 0000000..ead056f Binary files /dev/null and b/test/getsockopt_cli differ diff --git a/test/getsockopt_cli.c b/test/getsockopt_cli.c new file mode 100644 index 0000000..b265708 --- /dev/null +++ b/test/getsockopt_cli.c @@ -0,0 +1,47 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int main(int argc, char *argv[]) { + if (argc != 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + exit(EXIT_FAILURE); + } + + int sockfd = atoi(argv[1]); + int optval; + socklen_t optlen = sizeof(optval); + + // Check if the file descriptor is valid + if (fcntl(sockfd, F_GETFD) == -1) { + fprintf(stderr, "Invalid file descriptor: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + + // Check if the file descriptor is a socket + int type; + socklen_t length = sizeof(type); + if (getsockopt(sockfd, SOL_SOCKET, SO_TYPE, &type, &length) == -1) { + fprintf(stderr, "Not a socket: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + + printf("File descriptor: %d\n", sockfd); + printf("SOL_SOCKET: %d\n", SOL_SOCKET); + printf("SO_KEEPALIVE: %d\n", SO_KEEPALIVE); + + // Get the value of SO_KEEPALIVE + if (getsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, &optval, &optlen) < 0) { + fprintf(stderr, "getsockopt: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + + printf("SO_KEEPALIVE value: %d\n", optval); + return 0; +} diff --git a/test/testutils.js b/test/testutils.js new file mode 100644 index 0000000..cd490a4 --- /dev/null +++ b/test/testutils.js @@ -0,0 +1,162 @@ +const { exec } = require('child_process'); +const os = require('os'); +const assert = require('chai').assert; + +// Linux-specific functions +const dropPacketsLinux = (remoteIp) => { + exec(`sudo iptables -A OUTPUT -d ${remoteIp} -j DROP`, (error, stdout, stderr) => { + if (error) { + console.error(`Error dropping packets: ${error.message}`); + return; + } + if (stderr) { + console.error(`stderr: ${stderr}`); + return; + } + console.log(`Packets to ${remoteIp} dropped`); + }); +}; + +const restorePacketsLinux = (remoteIp) => { + exec(`sudo iptables -D OUTPUT -d ${remoteIp} -j DROP`, (error, stdout, stderr) => { + if (error) { + console.error(`Error restoring packets: ${error.message}`); + return; + } + if (stderr) { + console.error(`stderr: ${stderr}`); + return; + } + console.log(`Packets to ${remoteIp} restored`); + }); +}; + +// macOS-specific functions +const dropPacketsMacOS = (remoteIp) => { + const pfRule = `block drop out from any to ${remoteIp}`; + exec(`echo "${pfRule}" | sudo pfctl -ef -`, (error, stdout, stderr) => { + if (error) { + console.error(`Error dropping packets: ${error.message}`); + return; + } + if (stderr) { + console.error(`stderr: ${stderr}`); + return; + } + console.log(`Packets to ${remoteIp} dropped`); + }); +}; + +const restorePacketsMacOS = (remoteIp) => { + exec(`sudo pfctl -F all -f /etc/pf.conf`, (error, stdout, stderr) => { + if (error) { + console.error(`Error restoring packets: ${error.message}`); + return; + } + if (stderr) { + console.error(`stderr: ${stderr}`); + return; + } + console.log(`Packets to ${remoteIp} restored`); + }); +}; + +// Main functions +const dropPackets = (remoteIp) => { + if (os.platform() === 'linux') { + dropPacketsLinux(remoteIp); + } else if (os.platform() === 'darwin') { + dropPacketsMacOS(remoteIp); + } else { + console.error('Unsupported OS'); + } +}; + +const restorePackets = (remoteIp) => { + if (os.platform() === 'linux') { + restorePacketsLinux(remoteIp); + } else if (os.platform() === 'darwin') { + restorePacketsMacOS(remoteIp); + } else { + console.error('Unsupported OS'); + } +}; + +const assertIsNear = (actual, expected, delta) => { + assert.isAtLeast(actual, expected - delta, `Expected ${actual} to be at least ${expected - delta}`); + assert.isAtMost(actual, expected + delta, `Expected ${actual} to be at most ${expected + delta}`); +} + +module.exports = { + dropPackets, + restorePackets, + assertIsNear +} + +// // Example usage +// const remoteRedisIp = '192.168.1.100'; +// dropPackets(remoteRedisIp); +// +// // Wait for 5 seconds before restoring packet flow +// setTimeout(() => { +// restorePackets(remoteRedisIp); +// }, 5000); + + + +// const getSockOptValue = (socket, opt, cb) => { +// // if (process.env.CI !== undefined) { +// // return linuxGetSockOptValue(socket, opt, cb); +// // } +// // +// // return macosGetSockOptValue(socket, opt, cb); +// cb(NetKeepAlive.getKeepAliveInterval(socket)); +// }; +// +// const macosGetSockOptValue = (socket, opt, cb) => { +// const pid = process.pid; +// exec(`lsof -a -p ${pid} -i 4 -T f`, (error, stdout, stderr) => { +// if (error) { +// return cb(error); +// } +// if (stderr) { +// return cb(new Error(stderr)); +// } +// +// const keepAliveOption = stdout +// .split('\n') +// .find(line => line.includes(`:${socket.localPort}`) && line.includes(`:${socket.remotePort}`)); +// +// if (!keepAliveOption) { +// cb(new Error(`no entry found for local port ${socket.localPort}, and remote port ${socket.remotePort}`)); +// } +// +// if (!keepAliveOption.includes(opt)) { +// cb(new Error(`${opt} option not found: ${keepAliveOption}`)); +// } +// +// const keepAliveValue = parseInt(keepAliveOption.match(new RegExp(`${opt}=(\\d+)`))[1], 10); +// cb(null, keepAliveValue); +// }); +// }; +// +// const linuxGetSockOptValue = (socket, opt, cb) => { +// const SOL_SOCKET = 0xffff; +// const SOCK_OPTS = { +// 'SO=KEEPALIVE': 0x0008, +// }; +// if (!SOCK_OPTS[opt]) { +// return cb(new Error(`Unknown socket option: ${opt}`)); +// } +// +// const keepAliveValue = getsockopt(socket, SOL_SOCKET, SOCK_OPTS[opt]); +// cb(null, keepAliveValue); +// }; +// +// module.exports = { +// getSockOptValue +// }; +// +// +// +// const { exec } = require('child_process');