diff --git a/sdk/cs/src/FoundryLocalManager.cs b/sdk/cs/src/FoundryLocalManager.cs index 737c965..8e5b206 100644 --- a/sdk/cs/src/FoundryLocalManager.cs +++ b/sdk/cs/src/FoundryLocalManager.cs @@ -51,23 +51,15 @@ public partial class FoundryLocalManager : IDisposable, IAsyncDisposable // Sees if the service is already running public bool IsServiceRunning => _serviceUri != null; - public static async Task StartModelAsync(string aliasOrModelId, DeviceType? device = null, CancellationToken ct = default) + public async Task StartModelAsync(string aliasOrModelId, DeviceType? device = null, CancellationToken ct = default) { - var manager = new FoundryLocalManager(); - try - { - await manager.StartServiceAsync(ct); - var modelInfo = await manager.GetModelInfoAsync(aliasOrModelId, device, ct) + await StartServiceAsync(ct); + var modelInfo = await GetModelInfoAsync(aliasOrModelId, device, ct) ?? throw new InvalidOperationException($"Model {aliasOrModelId} not found in catalog."); - await manager.DownloadModelAsync(modelInfo.ModelId, device: device, token: null, force: false,ct: ct); - await manager.LoadModelAsync(aliasOrModelId, device: device, ct: ct); - return manager; - } - catch - { - manager.Dispose(); - throw; - } + await DownloadModelAsync(modelInfo.ModelId, device: device, token: null, force: false,ct: ct); + await LoadModelAsync(aliasOrModelId, device: device, ct: ct); + return modelInfo; + } public async Task StartServiceAsync(CancellationToken ct = default) diff --git a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs index f8f4003..29f5236 100644 --- a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs +++ b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs @@ -521,6 +521,69 @@ public async Task LoadModelAsync_Succeeds_AndPassesEpOverrideWhenCudaPresent() Assert.Equal("model-4-generic-gpu:1", result.ModelId); } + [Fact] + public async Task StartModelAsync_Succeeds_WhenModelIsInCatalogModelsCache() + { + // Arrange + var modelId = "model1"; + var model = new ModelInfo + { + ModelId = modelId, + Alias = "alias1", + Uri = "http://model", + ProviderType = "huggingface", + Runtime = new Runtime { DeviceType = DeviceType.GPU } + }; + + _mockHttp.When("/openai/models").Respond("application/json", $"[\"{modelId}\"]"); + _mockHttp.When(HttpMethod.Get, $"/openai/load/{modelId}*").Respond("application/json", "{}"); + + typeof(FoundryLocalManager) + .GetField("_catalogModels", BindingFlags.NonPublic | BindingFlags.Instance)! + .SetValue(_manager, new List { model }); + + // Act + var result = await _manager.StartModelAsync(modelId); + + // Assert + Assert.NotNull(result); + Assert.Equal(modelId, result.ModelId); + } + + [Fact] + public async Task StartModelAsync_Succeeds_WhenModelIsDownloadedButNotInCatalogModelsCache() + { + // Arrange + var modelId = "model1"; + var model = new ModelInfo + { + ModelId = modelId, + Alias = "alias1", + Uri = "http://model", + ProviderType = "huggingface", + Runtime = new Runtime { DeviceType = DeviceType.GPU } + }; + + var mockJsonResponse = "some log text... {\"success\": true, \"errorMessage\": null}"; + _mockHttp.When("/openai/download").Respond("application/json", mockJsonResponse); + _mockHttp.When("/openai/models").Respond("application/json", $"[\"{modelId}\"]"); + _mockHttp.When(HttpMethod.Get, $"/openai/load/{modelId}*").Respond("application/json", "{}"); + + var catalogJson = JsonSerializer.Serialize(new List { model }); + _mockHttp.When(HttpMethod.Get, "/foundry/list").Respond("application/json", catalogJson); + + typeof(FoundryLocalManager) + .GetField("_catalogModels", BindingFlags.NonPublic | BindingFlags.Instance)! + .SetValue(_manager, null); + + // Act + var result = await _manager.StartModelAsync(modelId); + + // Assert + Assert.NotNull(result); + Assert.Equal(modelId, result.ModelId); + } + [Fact] public async Task LoadModelAsync_ThrowsIfNotInCache() {