@@ -13,7 +13,7 @@ import { Functions, MergeArgs, TopologyMetadata } from "./worker";
1313// Timeout after which we kill/recreate the worker thread
1414const TASK_TIMEOUT_MS = 90_000 ;
1515
16- // Reserve 5Gb + 30 % of memory for responding to requests and loading topology data from disk
16+ // Reserve 6Gb + 35 % of memory for responding to requests and loading topology data from disk
1717// Remaining amount is split amongst each worker for topology data
1818// This strategy seems to work for any amount of host memory and targets total memory
1919// in use maxing out at around 80%
@@ -22,7 +22,7 @@ const dockerMemLimit = Number(
2222) ;
2323const hostmem = os . totalmem ( ) ;
2424const totalmem = Math . min ( hostmem , dockerMemLimit ) ;
25- const reservedMem = 5 * 1024 * 1024 * 1024 + totalmem * 0.3 ;
25+ const reservedMem = 6 * 1024 * 1024 * 1024 + totalmem * 0.35 ;
2626const maxCacheSize = Math . ceil ( ( totalmem - reservedMem ) / NUM_WORKERS ) ;
2727
2828const logger = new Logger ( "worker-pool" ) ;
@@ -83,13 +83,14 @@ async function findQueue(
8383 const size = regionSizes [ regionConfig . id ] ;
8484
8585 // Choose our next index by size
86+ const willFit = ( worker : number ) => workerSizes [ worker ] + size < maxCacheSize ;
8687 const getBestFit = ( workers : number [ ] ) : number =>
8788 ( ! size
8889 ? // Use the smallest worker if size is unknown
8990 _ . minBy ( workers , idx => workerSizes [ idx ] )
9091 : // Use the largest worker that will fit if size is known
91- workerSizes . some ( workerSize => workerSize + size < maxCacheSize )
92- ? _ . minBy ( workers , idx => maxCacheSize - ( workerSizes [ idx ] + size ) )
92+ workers . some ( willFit )
93+ ? _ . minBy ( workers . filter ( willFit ) , idx => maxCacheSize - ( workerSizes [ idx ] + size ) )
9394 : // If no workers will fit, we use the least recently used worker
9495 // eslint-disable-next-line functional/immutable-data
9596 workersByRecency . pop ( ) ) || workers [ 0 ] ;
0 commit comments