From ca10b0db944fe02ddb2e7dd12e400feba543a3ba Mon Sep 17 00:00:00 2001 From: cbigger <53627581+cbigger@users.noreply.github.com> Date: Fri, 9 Jun 2023 17:05:26 -0400 Subject: [PATCH] Update _llama_cpp.py Fixing small method call error i ran into when running your combined guidance + llama_cpp implementation. I believe LlamaCppSession class inherits from the LLMSession class which has methods `_cache_params` and `_gen_key`. Thank you for your constant work in the local LLM community, you are a godsend. --- guidance/llms/_llama_cpp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/guidance/llms/_llama_cpp.py b/guidance/llms/_llama_cpp.py index dd0fa1813..c985af59e 100644 --- a/guidance/llms/_llama_cpp.py +++ b/guidance/llms/_llama_cpp.py @@ -283,7 +283,7 @@ async def __call__(self, prompt, stop=None, stop_regex=None, temperature=None, n token_healing = self.llm.token_healing # generate the cache key - key = self._cache_key(locals()) + key = self._gen_key(locals()) # set the stop patterns if stop is not None: @@ -778,4 +778,4 @@ def __next__(self): if value is None: raise StopIteration() else: - return value \ No newline at end of file + return value