Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 110 additions & 0 deletions transformerlab/models/lmstudiomodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
from transformerlab.models import basemodel

import os
import json
import errno
import shutil

_LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")


async def list_models():
try:
models_dir = lmstudio_models_dir()
except Exception as e:
print("Failed to locate LM Studio models directory:")
print(str(e))
return []

if not models_dir:
return []

models = []
for root, _, files in os.walk(models_dir):
for fname in files:
if fname.lower().endswith(_LM_MODEL_EXTS):
model_path = os.path.join(root, fname)
models.append(LMStudioModel(model_path))

return models


class LMStudioModel(basemodel.BaseModel):
def __init__(self, model_path: str):
filename = os.path.basename(model_path)
super().__init__(model_id=filename)

self.source = "lmstudio"
self.name = f"{os.path.splitext(filename)[0]} (LM Studio)"
self.source_id_or_path = os.path.abspath(model_path)
self.model_filename = filename

async def get_json_data(self):
json_data = await super().get_json_data()

ext = os.path.splitext(self.model_filename)[1].lower()
if ext == ".gguf":
json_data["architecture"] = "GGUF"
json_data["formats"] = ["GGUF"]
elif ext in (".safetensors", ".pt", ".bin"):
json_data["architecture"] = "PyTorch"
json_data["formats"] = ["safetensors" if ext == ".safetensors" else "pt"]
else:
json_data["architecture"] = ""
json_data["formats"] = []

json_data["source_id_or_path"] = self.source_id_or_path
return json_data

async def install(self):
input_model_path = self.source_id_or_path
if not input_model_path or not os.path.isfile(input_model_path):

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
This path depends on a
user-provided value
.

Copilot Autofix

AI 6 days ago

To fix this vulnerability, the code must ensure that any user-supplied filename or path, when resolved, is guaranteed to remain within the intended LM Studio models directory. This should be enforced before any sensitive file system operation is performed.

Specifically, in the LMStudioModel __init__ method (or in install), after constructing the path but before assignment, the code should:

  • Normalize the constructed path (using os.path.abspath and os.path.normpath).
  • Check that it is contained within the expected models directory (lmstudio_models_dir()), using a strong prefix match (startswith only after normalization).
  • If this check fails, raise an exception or handle the import as invalid.

Changes to make:

  • In LMStudioModel.__init__, validate that the resolved absolute model path is inside the LM Studio models directory. If not, raise a ValueError.
  • This ensures that even if the user supplies a malicious path, they cannot escape the designated directory.

Implementation specifics:

  • Add a models_dir variable in __init__, resolve it via lmstudio_models_dir().
  • Use os.path.abspath(os.path.join(models_dir, model_path)) (or similar) to resolve any model_id as a file underneath the intended directory.
  • Normalize and check with startswith as in the recommendation.
  • Raise exception if check fails.

Suggested changeset 1
transformerlab/models/lmstudiomodel.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/transformerlab/models/lmstudiomodel.py b/transformerlab/models/lmstudiomodel.py
--- a/transformerlab/models/lmstudiomodel.py
+++ b/transformerlab/models/lmstudiomodel.py
@@ -31,12 +31,24 @@
 
 class LMStudioModel(basemodel.BaseModel):
     def __init__(self, model_path: str):
-        filename = os.path.basename(model_path)
+        # Validate that the user-supplied path is inside the LMStudio models directory
+        models_dir = lmstudio_models_dir()
+        if not models_dir:
+            raise ValueError("Could not locate LM Studio models directory.")
+        # Support only files underneath the models_dir
+        # This ensures no path traversal outside the safe root
+        abs_models_dir = os.path.abspath(models_dir)
+        abs_model_path = os.path.abspath(os.path.join(abs_models_dir, model_path))
+        if not abs_model_path.startswith(abs_models_dir + os.sep):
+            raise ValueError("Requested model path is outside the LM Studio models directory.")
+        if not os.path.isfile(abs_model_path):
+            raise FileNotFoundError(f"Model file not found: {abs_model_path}")
+        filename = os.path.basename(abs_model_path)
         super().__init__(model_id=filename)
 
         self.source = "lmstudio"
         self.name = f"{os.path.splitext(filename)[0]} (LM Studio)"
-        self.source_id_or_path = os.path.abspath(model_path)
+        self.source_id_or_path = abs_model_path
         self.model_filename = filename
 
     async def get_json_data(self):
EOF
@@ -31,12 +31,24 @@

class LMStudioModel(basemodel.BaseModel):
def __init__(self, model_path: str):
filename = os.path.basename(model_path)
# Validate that the user-supplied path is inside the LMStudio models directory
models_dir = lmstudio_models_dir()
if not models_dir:
raise ValueError("Could not locate LM Studio models directory.")
# Support only files underneath the models_dir
# This ensures no path traversal outside the safe root
abs_models_dir = os.path.abspath(models_dir)
abs_model_path = os.path.abspath(os.path.join(abs_models_dir, model_path))
if not abs_model_path.startswith(abs_models_dir + os.sep):
raise ValueError("Requested model path is outside the LM Studio models directory.")
if not os.path.isfile(abs_model_path):
raise FileNotFoundError(f"Model file not found: {abs_model_path}")
filename = os.path.basename(abs_model_path)
super().__init__(model_id=filename)

self.source = "lmstudio"
self.name = f"{os.path.splitext(filename)[0]} (LM Studio)"
self.source_id_or_path = os.path.abspath(model_path)
self.source_id_or_path = abs_model_path
self.model_filename = filename

async def get_json_data(self):
Copilot is powered by AI and may make mistakes. Always verify output.
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), input_model_path)

from lab.dirs import get_models_dir

output_filename = self.id
output_path = os.path.join(get_models_dir(), output_filename)

if os.path.exists(output_path):

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.

Copilot Autofix

AI 6 days ago

To fix the uncontrolled path expression problem, we must ensure that the constructed output path (output_path) is strictly constrained to a subdirectory of the intended models directory. This is best done by:

  1. Sanitizing the user-controlled output_filename using a function like werkzeug.utils.secure_filename to strip/replace potentially dangerous characters.
  2. Resolving the final path (using os.path.abspath or os.path.realpath) and confirming that it starts with the trusted base directory returned by get_models_dir(). If it doesn't, we must reject the operation.
  3. Optionally, add a check to block absolute paths or path components containing directory traversal (..).

The main places to edit:

  • In transformerlab/models/lmstudiomodel.py, method install():
    • Sanitize output_filename with secure_filename.
    • After constructing output_path, normalize and check that it is within the directory returned by get_models_dir(). If not, raise an error.

New import(s) for secure_filename may be required.


Suggested changeset 1
transformerlab/models/lmstudiomodel.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/transformerlab/models/lmstudiomodel.py b/transformerlab/models/lmstudiomodel.py
--- a/transformerlab/models/lmstudiomodel.py
+++ b/transformerlab/models/lmstudiomodel.py
@@ -4,7 +4,7 @@
 import json
 import errno
 import shutil
-
+from werkzeug.utils import secure_filename
 _LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")
 
 
@@ -63,13 +63,18 @@
 
         from lab.dirs import get_models_dir
 
-        output_filename = self.id
-        output_path = os.path.join(get_models_dir(), output_filename)
+        output_filename = secure_filename(self.id)
+        models_dir = get_models_dir()
+        output_path = os.path.join(models_dir, output_filename)
+        output_path = os.path.abspath(output_path)
+        # Ensure output_path is within models_dir
+        models_dir_abs = os.path.abspath(models_dir)
+        if not output_path.startswith(models_dir_abs + os.sep):
+            raise ValueError("Invalid model id/path.")
 
         if os.path.exists(output_path):
             raise FileExistsError(errno.EEXIST, "Model already exists", output_path)
         os.makedirs(output_path, exist_ok=True)
-
         link_name = os.path.join(output_path, output_filename)
         os.symlink(input_model_path, link_name)
 
EOF
@@ -4,7 +4,7 @@
import json
import errno
import shutil

from werkzeug.utils import secure_filename
_LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")


@@ -63,13 +63,18 @@

from lab.dirs import get_models_dir

output_filename = self.id
output_path = os.path.join(get_models_dir(), output_filename)
output_filename = secure_filename(self.id)
models_dir = get_models_dir()
output_path = os.path.join(models_dir, output_filename)
output_path = os.path.abspath(output_path)
# Ensure output_path is within models_dir
models_dir_abs = os.path.abspath(models_dir)
if not output_path.startswith(models_dir_abs + os.sep):
raise ValueError("Invalid model id/path.")

if os.path.exists(output_path):
raise FileExistsError(errno.EEXIST, "Model already exists", output_path)
os.makedirs(output_path, exist_ok=True)

link_name = os.path.join(output_path, output_filename)
os.symlink(input_model_path, link_name)

Copilot is powered by AI and may make mistakes. Always verify output.
raise FileExistsError(errno.EEXIST, "Model already exists", output_path)
os.makedirs(output_path, exist_ok=True)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.

Copilot Autofix

AI 6 days ago

The best way to fix the problem is to validate that any untrusted path segment (here, output_filename derived from user input in self.id) does not contain directory traversal or absolute path elements before using it to construct file system paths. In particular:

  • Normalize the final constructed path (output_path) using os.path.normpath.
  • Verify that the resulting normalized path starts with the intended root directory (get_models_dir()), after normalization.
  • Optionally, ensure output_filename is a "secure" filename, e.g., using werkzeug.utils.secure_filename or by allowing only whitelisted model IDs.
  • If the path is not contained in the intended directory after normalization, abort the installation and report an error.

This affects the install method in LMStudioModel in transformerlab/models/lmstudiomodel.py, specifically around the lines where output_path is constructed and used. We will add path normalization, containment checking, and filename sanitization. We need to import werkzeug.utils.secure_filename at the top.

Suggested changeset 1
transformerlab/models/lmstudiomodel.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/transformerlab/models/lmstudiomodel.py b/transformerlab/models/lmstudiomodel.py
--- a/transformerlab/models/lmstudiomodel.py
+++ b/transformerlab/models/lmstudiomodel.py
@@ -4,7 +4,7 @@
 import json
 import errno
 import shutil
-
+from werkzeug.utils import secure_filename
 _LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")
 
 
@@ -63,9 +63,15 @@
 
         from lab.dirs import get_models_dir
 
-        output_filename = self.id
-        output_path = os.path.join(get_models_dir(), output_filename)
-
+        # Sanitize and validate filename
+        output_filename = secure_filename(self.id)
+        if not output_filename:
+            raise ValueError("Invalid model filename from model id.")
+        models_root = os.path.abspath(get_models_dir())
+        output_path = os.path.normpath(os.path.join(models_root, output_filename))
+        # Ensure output_path is inside models_root after normalization
+        if not output_path.startswith(models_root + os.sep):
+            raise ValueError("Attempted path traversal detected in model id.")
         if os.path.exists(output_path):
             raise FileExistsError(errno.EEXIST, "Model already exists", output_path)
         os.makedirs(output_path, exist_ok=True)
EOF
@@ -4,7 +4,7 @@
import json
import errno
import shutil

from werkzeug.utils import secure_filename
_LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")


@@ -63,9 +63,15 @@

from lab.dirs import get_models_dir

output_filename = self.id
output_path = os.path.join(get_models_dir(), output_filename)

# Sanitize and validate filename
output_filename = secure_filename(self.id)
if not output_filename:
raise ValueError("Invalid model filename from model id.")
models_root = os.path.abspath(get_models_dir())
output_path = os.path.normpath(os.path.join(models_root, output_filename))
# Ensure output_path is inside models_root after normalization
if not output_path.startswith(models_root + os.sep):
raise ValueError("Attempted path traversal detected in model id.")
if os.path.exists(output_path):
raise FileExistsError(errno.EEXIST, "Model already exists", output_path)
os.makedirs(output_path, exist_ok=True)
Copilot is powered by AI and may make mistakes. Always verify output.

link_name = os.path.join(output_path, output_filename)
os.symlink(input_model_path, link_name)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
This path depends on a
user-provided value
.

Copilot Autofix

AI 6 days ago

To fix this issue, we want to ensure that the input_model_path used in os.symlink is always inside the trusted LM Studio models directory, so that the path cannot be abused by a malicious user. This can be achieved by normalizing the input path and checking that it starts with the trusted models directory's absolute path. If it does not, an exception should be raised. This check should be performed inside the install method of LMStudioModel before os.symlink is called.

Specifically, in transformerlab/models/lmstudiomodel.py:

  • Inside LMStudioModel.install, after assigning input_model_path, retrieve the root trusted directory (lmstudio_models_dir()), use os.path.abspath + os.path.normpath on both the user-controlled path (input_model_path) and the root, and check that the user-controlled path starts with the root.
  • If the check fails, raise an exception and do not proceed.
  • Add import of the utility if not already present.
    No existing functionality will be broken, and legitimate users are not affected.

Suggested changeset 1
transformerlab/models/lmstudiomodel.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/transformerlab/models/lmstudiomodel.py b/transformerlab/models/lmstudiomodel.py
--- a/transformerlab/models/lmstudiomodel.py
+++ b/transformerlab/models/lmstudiomodel.py
@@ -61,6 +61,12 @@
         if not input_model_path or not os.path.isfile(input_model_path):
             raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), input_model_path)
 
+        # Validate that input_model_path is inside the trusted lmstudio models dir
+        trusted_root = os.path.abspath(os.path.normpath(lmstudio_models_dir()))
+        normalized_input_path = os.path.abspath(os.path.normpath(input_model_path))
+        if not (normalized_input_path.startswith(trusted_root + os.sep)):
+            raise PermissionError(f"Model file path {input_model_path} is outside of trusted models directory {trusted_root}.")
+
         from lab.dirs import get_models_dir
 
         output_filename = self.id
EOF
@@ -61,6 +61,12 @@
if not input_model_path or not os.path.isfile(input_model_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), input_model_path)

# Validate that input_model_path is inside the trusted lmstudio models dir
trusted_root = os.path.abspath(os.path.normpath(lmstudio_models_dir()))
normalized_input_path = os.path.abspath(os.path.normpath(input_model_path))
if not (normalized_input_path.startswith(trusted_root + os.sep)):
raise PermissionError(f"Model file path {input_model_path} is outside of trusted models directory {trusted_root}.")

from lab.dirs import get_models_dir

output_filename = self.id
Copilot is powered by AI and may make mistakes. Always verify output.

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.

Copilot Autofix

AI 6 days ago

To mitigate path traversal and arbitrary file writes, the best practice is to sanitize and constrain the filenames/paths derived from user input before using them in any filesystem operation. Specifically:

  1. Only allow simple, safe filenames for models (output_filename).
  2. Use werkzeug.utils.secure_filename() to strip dangerous characters and patterns.
  3. Optionally, check that the resulting path after normalization is within the intended workspace/models root directory.

For the provided code, this means:

  • In LMStudioModel.install, before using self.id (output_filename) to construct output_path and link_name, sanitize it via secure_filename.
  • Since secure_filename is only imported in transformerlab/routers/model.py, add an import in transformerlab/models/lmstudiomodel.py.
  • Replace the value of output_filename with the sanitized variant.

Files/regions/lines to change:

  • Add from werkzeug.utils import secure_filename near the top of transformerlab/models/lmstudiomodel.py.
  • In install, before using self.id, sanitize it: output_filename = secure_filename(self.id).
  • Use output_filename as usual below, now guaranteed to be safe.

Suggested changeset 1
transformerlab/models/lmstudiomodel.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/transformerlab/models/lmstudiomodel.py b/transformerlab/models/lmstudiomodel.py
--- a/transformerlab/models/lmstudiomodel.py
+++ b/transformerlab/models/lmstudiomodel.py
@@ -4,7 +4,7 @@
 import json
 import errno
 import shutil
-
+from werkzeug.utils import secure_filename
 _LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")
 
 
@@ -63,7 +63,7 @@
 
         from lab.dirs import get_models_dir
 
-        output_filename = self.id
+        output_filename = secure_filename(self.id)
         output_path = os.path.join(get_models_dir(), output_filename)
 
         if os.path.exists(output_path):
EOF
@@ -4,7 +4,7 @@
import json
import errno
import shutil

from werkzeug.utils import secure_filename
_LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")


@@ -63,7 +63,7 @@

from lab.dirs import get_models_dir

output_filename = self.id
output_filename = secure_filename(self.id)
output_path = os.path.join(get_models_dir(), output_filename)

if os.path.exists(output_path):
Copilot is powered by AI and may make mistakes. Always verify output.

json_data = await self.get_json_data()

model_description = {
"model_id": self.id,
"model_filename": output_filename,
"name": self.name,
"source": self.source,
"json_data": {
"uniqueID": self.id,
"name": self.name,
"model_filename": output_filename,
"description": f"LM Studio model {self.source_id_or_path}",
"source": self.source,
"architecture": json_data["architecture"],
},
}

model_info_file = os.path.join(output_path, "index.json")
with open(model_info_file, "w") as f:

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.

Copilot Autofix

AI 6 days ago

To address this problem, we should sanitize or validate the use of user-controlled data in filesystem paths. The best general-purpose approach for this codebase is to ensure that the output_filename (derived from model_id) is a safe filename. This can be achieved by using werkzeug.utils.secure_filename, which strips dangerous characters and sub-paths (slashes, path traversal, etc.) from filenames, thereby preventing directory traversal and related issues.

The fix should be applied at the point where output_filename is derived (in LMStudioModel.install):

  • Import secure_filename from werkzeug.utils if not already done in this file.
  • Replace the assignment to output_filename with a version that passes self.id through secure_filename.

This ensures no untrusted or unsafe input is used as part of any file or directory name, mitigating the risk of path traversal regardless of any other possible input validation or lack thereof.

Suggested changeset 1
transformerlab/models/lmstudiomodel.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/transformerlab/models/lmstudiomodel.py b/transformerlab/models/lmstudiomodel.py
--- a/transformerlab/models/lmstudiomodel.py
+++ b/transformerlab/models/lmstudiomodel.py
@@ -4,7 +4,7 @@
 import json
 import errno
 import shutil
-
+from werkzeug.utils import secure_filename
 _LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")
 
 
@@ -63,7 +63,7 @@
 
         from lab.dirs import get_models_dir
 
-        output_filename = self.id
+        output_filename = secure_filename(self.id)
         output_path = os.path.join(get_models_dir(), output_filename)
 
         if os.path.exists(output_path):
EOF
@@ -4,7 +4,7 @@
import json
import errno
import shutil

from werkzeug.utils import secure_filename
_LM_MODEL_EXTS = (".gguf", ".safetensors", ".pt", ".bin")


@@ -63,7 +63,7 @@

from lab.dirs import get_models_dir

output_filename = self.id
output_filename = secure_filename(self.id)
output_path = os.path.join(get_models_dir(), output_filename)

if os.path.exists(output_path):
Copilot is powered by AI and may make mistakes. Always verify output.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can probably use this pattern anywhere that it is complaining about user-assigned values for paths.

json.dump(model_description, f)


def lmstudio_models_dir():
try:
lm_dir = os.environ["LMSTUDIO_MODELS"]
except KeyError:
lm_dir = os.path.join(os.path.expanduser("~"), ".lmstudio", "models")

if os.path.isdir(lm_dir):
return lm_dir

if shutil.which("lmstudio"):
return lm_dir

return None
5 changes: 4 additions & 1 deletion transformerlab/models/model_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from transformerlab.models import ollamamodel
from transformerlab.models import huggingfacemodel
from transformerlab.models import localmodel
from transformerlab.models import lmstudiomodel

import traceback

Expand Down Expand Up @@ -49,7 +50,7 @@ def list_model_sources():
Supported strings that can be passsed as model_source
to the functons that follow.
"""
return ["huggingface", "ollama"]
return ["huggingface", "ollama", "lmstudio"]


def get_model_by_source_id(model_source: str, model_source_id: str):
Expand All @@ -65,6 +66,8 @@ def get_model_by_source_id(model_source: str, model_source_id: str):
return ollamamodel.OllamaModel(model_source_id)
case "huggingface":
return huggingfacemodel.HuggingFaceModel(model_source_id)
case "lmstudio":
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you'll also have to add a case for "lmstudio" to the list_models_from_source function so they show up on the import modal.

return lmstudiomodel.LMStudioModel(model_source_id)
except Exception:
print(f"Caught exception getting model {model_source_id} from {model_source}:")
traceback.print_exc()
Expand Down
18 changes: 18 additions & 0 deletions transformerlab/plugins/lmstudio_server/index.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"name": "Google LM Studio Server",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's pull this for now and discuss if we actually want to add a server or not.

"uniqueId": "lmstudio_server",
"description": "Google LM Studio loads models for inference using LM Studio for generation.",
"plugin-format": "python",
"type": "loader",
"version": "0.0.47",
"supports": ["chat", "completion", "embeddings"],
"files": ["main.py", "setup.sh"],
"parameters": {
"port": {
"title": "Server Port",
"type": "integer",
"default": 1234
}
},
"setup-script": "setup.sh"
}
80 changes: 80 additions & 0 deletions transformerlab/plugins/lmstudio_server/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""
LM Studio Server

This plugin integrates LM Studio models into TransformerLab, allowing users to utilize models stored in the LM Studio format.
"""

import argparse
import os
import subprocess
import json
import uuid
from hashlib import sha256

Check failure on line 12 in transformerlab/plugins/lmstudio_server/main.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

transformerlab/plugins/lmstudio_server/main.py:12:21: F401 `hashlib.sha256` imported but unused

Check failure on line 12 in transformerlab/plugins/lmstudio_server/main.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

transformerlab/plugins/lmstudio_server/main.py:12:21: F401 `hashlib.sha256` imported but unused
from pathlib import Path

Check failure on line 13 in transformerlab/plugins/lmstudio_server/main.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

transformerlab/plugins/lmstudio_server/main.py:13:21: F401 `pathlib.Path` imported but unused

Check failure on line 13 in transformerlab/plugins/lmstudio_server/main.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

transformerlab/plugins/lmstudio_server/main.py:13:21: F401 `pathlib.Path` imported but unused
import sys
import lmstudio

Check failure on line 15 in transformerlab/plugins/lmstudio_server/main.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

transformerlab/plugins/lmstudio_server/main.py:15:8: F401 `lmstudio` imported but unused

Check failure on line 15 in transformerlab/plugins/lmstudio_server/main.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

transformerlab/plugins/lmstudio_server/main.py:15:8: F401 `lmstudio` imported but unused
import time
import requests

worker_id = str(uuid.uuid4())[:8]

LMSTUDIO_STARTUP_TIMEOUT = 180 # seconds

try:
from transformerlab.plugin import register_process
except ImportError:
from transformerlab.plugin_sdk.transformerlab.plugin import register_process

parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str)
parser.add_argument("--parameters", type=str, default="{}")

args, unknown = parser.parse_known_args()
# model_path can be a hugging face ID or a local file in Transformer Lab
# But LM Studio models are always stored as a local path because
# we are using a specific LM Studio model file
if os.path.exists(args.model_path):
model_path = args.model_path
else:
raise FileNotFoundError(
f"The specified LM Studio model '{args.model_path}' was not found. Please select a valid LM Studio model file to proceed."
)

llmlab_root_dir = os.getenv("LLM_LAB_ROOT_PATH")

parameters = args.parameters
parameters = json.loads(parameters)
# Now go through the parameters object and remove the key that is equal to "inferenceEngine":
if "inferenceEngine" in parameters:
del parameters["inferenceEngine"]

if "inferenceEngineFriendlyName" in parameters:
del parameters["inferenceEngineFriendlyName"]

# Get plugin directory
real_plugin_dir = os.path.realpath(os.path.dirname(__file__))

port = int(parameters.get("port", 1234))
env = os.environ.copy()
env["LMSTUDIO_HOST"] = f"127.0.0.1:{port}"
print("Starting LM Studio server...", file=sys.stderr)

process = subprocess.Popen(["lms", "server", "start", f"--port {port}"], env=env)

lmstudio_models_url = f"http://127.0.0.1:{port}/v1/models"
start_time = time.time()
while True:
try:
response = requests.get(lmstudio_models_url)
if response.status_code == 200:
print("LM Studio server is up and running.", file=sys.stderr)
break
except requests.ConnectionError:
pass
if time.time() - start_time > LMSTUDIO_STARTUP_TIMEOUT:
print("Timeout waiting for LM Studio server to start.", file=sys.stderr)
process.terminate()
sys.exit(1)
time.sleep(1)

register_process(process.pid)
1 change: 1 addition & 0 deletions transformerlab/plugins/lmstudio_server/setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
uv pip install lmstudio
Loading