@@ -222,9 +222,9 @@ class AsyncCacheStressor : public Stressor {
222222 ThroughputStats& stats,
223223 const Request* req,
224224 folly::EventBase* evb,
225- const std::string* key) {
225+ const std::string_view key) {
226226 ++stats.get ;
227- auto lock = chainedItemAcquireSharedLock (* key);
227+ auto lock = chainedItemAcquireSharedLock (key);
228228
229229 if (ticker_) {
230230 ticker_->updateTimeStamp (req->timestamp );
@@ -233,8 +233,7 @@ class AsyncCacheStressor : public Stressor {
233233 // add a distribution over sequences of requests/access patterns
234234 // e.g. get-no-set and set-no-get
235235
236- auto onReadyFn = [&, req, key = *key,
237- l = std::move (lock)](auto hdl) mutable {
236+ auto onReadyFn = [&, req, key, l = std::move (lock)](auto hdl) mutable {
238237 auto result = OpResultType::kGetMiss ;
239238
240239 if (hdl == nullptr ) {
@@ -247,7 +246,7 @@ class AsyncCacheStressor : public Stressor {
247246 // appropriate here)
248247 l.unlock ();
249248 auto xlock = chainedItemAcquireUniqueLock (key);
250- setKey (pid, stats, & key, *(req->sizeBegin ), req->ttlSecs ,
249+ setKey (pid, stats, key, *(req->sizeBegin ), req->ttlSecs ,
251250 req->admFeatureMap );
252251 }
253252 } else {
@@ -260,8 +259,8 @@ class AsyncCacheStressor : public Stressor {
260259 }
261260 };
262261
263- cache_->recordAccess (* key);
264- auto sf = cache_->asyncFind (* key);
262+ cache_->recordAccess (key);
263+ auto sf = cache_->asyncFind (key);
265264 if (sf.isReady ()) {
266265 // If the handle is ready, call onReadyFn directly to process the handle
267266 onReadyFn (std::move (sf).value ());
@@ -283,9 +282,9 @@ class AsyncCacheStressor : public Stressor {
283282 ThroughputStats& stats,
284283 const Request* req,
285284 folly::EventBase* evb,
286- const std::string* key) {
285+ const std::string_view key) {
287286 ++stats.get ;
288- auto lock = chainedItemAcquireUniqueLock (* key);
287+ auto lock = chainedItemAcquireUniqueLock (key);
289288
290289 // This was moved outside the lambda, as otherwise gcc-8.x crashes with an
291290 // internal compiler error here (suspected regression in folly).
@@ -297,7 +296,7 @@ class AsyncCacheStressor : public Stressor {
297296 ++stats.getMiss ;
298297
299298 ++stats.set ;
300- wHdl = cache_->allocate (pid, * key, *(req->sizeBegin ), req->ttlSecs );
299+ wHdl = cache_->allocate (pid, key, *(req->sizeBegin ), req->ttlSecs );
301300 if (!wHdl) {
302301 ++stats.setFailure ;
303302 return ;
@@ -327,7 +326,7 @@ class AsyncCacheStressor : public Stressor {
327326 };
328327
329328 // Always use asyncFind as findToWrite is sync when using HybridCache
330- auto sf = cache_->asyncFind (* key);
329+ auto sf = cache_->asyncFind (key);
331330 if (sf.isReady ()) {
332331 onReadyFn (std::move (sf).value ());
333332 return ;
@@ -345,10 +344,10 @@ class AsyncCacheStressor : public Stressor {
345344 void asyncUpdate (ThroughputStats& stats,
346345 const Request* req,
347346 folly::EventBase* evb,
348- const std::string* key) {
347+ const std::string_view key) {
349348 ++stats.get ;
350349 ++stats.update ;
351- auto lock = chainedItemAcquireUniqueLock (* key);
350+ auto lock = chainedItemAcquireUniqueLock (key);
352351 if (ticker_) {
353352 ticker_->updateTimeStamp (req->timestamp );
354353 }
@@ -363,7 +362,7 @@ class AsyncCacheStressor : public Stressor {
363362 cache_->updateItemRecordVersion (wHdl);
364363 };
365364
366- auto sf = cache_->asyncFind (* key);
365+ auto sf = cache_->asyncFind (key);
367366 if (sf.isReady ()) {
368367 onReadyFn (std::move (sf).value ());
369368 return ;
@@ -457,18 +456,18 @@ class AsyncCacheStressor : public Stressor {
457456 const auto pid = static_cast <PoolId>(opPoolDist (gen));
458457 const Request& req (getReq (pid, gen, lastRequestId));
459458 OpType op = req.getOp ();
460- const std::string* key = &( req.key ) ;
461- std::string oneHitKey;
459+ std::string_view key = req.key ;
460+ std::string_view oneHitKey;
462461 if (op == OpType::kLoneGet || op == OpType::kLoneSet ) {
463462 oneHitKey = Request::getUniqueKey ();
464- key = & oneHitKey;
463+ key = oneHitKey;
465464 }
466465
467466 OpResultType result (OpResultType::kNop );
468467 switch (op) {
469468 case OpType::kLoneSet :
470469 case OpType::kSet : {
471- auto lock = chainedItemAcquireUniqueLock (* key);
470+ auto lock = chainedItemAcquireUniqueLock (key);
472471 result = setKey (pid, stats, key, *(req.sizeBegin ), req.ttlSecs ,
473472 req.admFeatureMap );
474473
@@ -481,8 +480,8 @@ class AsyncCacheStressor : public Stressor {
481480 }
482481 case OpType::kDel : {
483482 ++stats.del ;
484- auto lock = chainedItemAcquireUniqueLock (* key);
485- auto res = cache_->remove (* key);
483+ auto lock = chainedItemAcquireUniqueLock (key);
484+ auto res = cache_->remove (key);
486485 if (res == CacheT::RemoveRes::kNotFoundInRam ) {
487486 ++stats.delNotFound ;
488487 }
@@ -532,7 +531,7 @@ class AsyncCacheStressor : public Stressor {
532531 OpResultType setKey (
533532 PoolId pid,
534533 ThroughputStats& stats,
535- const std::string* key,
534+ const std::string_view key,
536535 size_t size,
537536 uint32_t ttlSecs,
538537 const std::unordered_map<std::string, std::string>& featureMap) {
@@ -543,7 +542,7 @@ class AsyncCacheStressor : public Stressor {
543542 }
544543
545544 ++stats.set ;
546- auto it = cache_->allocate (pid, * key, size, ttlSecs);
545+ auto it = cache_->allocate (pid, key, size, ttlSecs);
547546 if (it == nullptr ) {
548547 ++stats.setFailure ;
549548 return OpResultType::kSetFailure ;
0 commit comments