Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions OllamaKit
Submodule OllamaKit added at e0054c
365 changes: 365 additions & 0 deletions Sources/LocalLLM/LocalLLMManager.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,365 @@
//
// LocalLLMManager.swift
// Invisibility
//
// Central manager for local LLM providers (Ollama, LM Studio)
//

import Combine
import Foundation
import os

/// Central manager for local LLM providers
@MainActor
public final class LocalLLMManager: ObservableObject {
// MARK: - Singleton

public static let shared = LocalLLMManager()

// MARK: - Properties

private let logger = Logger(subsystem: "Invisibility", category: "LocalLLMManager")

/// Ollama provider instance
@Published public private(set) var ollamaProvider: OllamaProvider

/// LM Studio provider instance
@Published public private(set) var lmStudioProvider: LMStudioProvider

/// Currently active provider type
@Published public var activeProviderType: LocalLLMProviderType? {
didSet {
saveSettings()
}
}

/// Currently selected model ID
@Published public var selectedModelId: String? {
didSet {
saveSettings()
}
}

/// All available models from enabled providers
@Published public private(set) var allAvailableModels: [LocalLLMModel] = []

/// Combined connection status for local LLMs
@Published public private(set) var isAnyProviderConnected: Bool = false

private var cancellables = Set<AnyCancellable>()
private let settingsKey = "LocalLLMSettings"

// MARK: - Initialization

private init() {
// Load saved settings
let settings = Self.loadSettings()

// Initialize providers with saved configuration
ollamaProvider = OllamaProvider(configuration: settings.ollamaConfig)
lmStudioProvider = LMStudioProvider(configuration: settings.lmStudioConfig)
activeProviderType = settings.activeProviderType
selectedModelId = settings.selectedModelId

setupObservers()

// Check connections on init
Task {
await refreshConnections()
}
}

// MARK: - Setup

private func setupObservers() {
// Observe Ollama connection status
ollamaProvider.connectionStatusPublisher
.receive(on: DispatchQueue.main)
.sink { [weak self] _ in
self?.updateCombinedStatus()
}
.store(in: &cancellables)

// Observe LM Studio connection status
lmStudioProvider.connectionStatusPublisher
.receive(on: DispatchQueue.main)
.sink { [weak self] _ in
self?.updateCombinedStatus()
}
.store(in: &cancellables)

// Observe Ollama models
ollamaProvider.availableModelsPublisher
.receive(on: DispatchQueue.main)
.sink { [weak self] _ in
self?.updateAllModels()
}
.store(in: &cancellables)

// Observe LM Studio models
lmStudioProvider.availableModelsPublisher
.receive(on: DispatchQueue.main)
.sink { [weak self] _ in
self?.updateAllModels()
}
.store(in: &cancellables)
}

private func updateCombinedStatus() {
isAnyProviderConnected =
ollamaProvider.connectionStatus.isConnected ||
lmStudioProvider.connectionStatus.isConnected
}

private func updateAllModels() {
var models: [LocalLLMModel] = []

if ollamaProvider.configuration.isEnabled {
models.append(contentsOf: ollamaProvider.availableModels)
}

if lmStudioProvider.configuration.isEnabled {
models.append(contentsOf: lmStudioProvider.availableModels)
}

allAvailableModels = models
}

// MARK: - Provider Access

/// Get the currently active provider
public var activeProvider: LocalLLMProvider? {
guard let type = activeProviderType else { return nil }
return provider(for: type)
}

/// Get provider for a specific type
public func provider(for type: LocalLLMProviderType) -> LocalLLMProvider {
switch type {
case .ollama:
return ollamaProvider
case .lmStudio:
return lmStudioProvider
}
}

/// Get the currently selected model
public var selectedModel: LocalLLMModel? {
guard let modelId = selectedModelId else { return nil }
return allAvailableModels.first { $0.id == modelId }
}

// MARK: - Connection Management

/// Refresh connections for all enabled providers
public func refreshConnections() async {
logger.debug("Refreshing local LLM connections")

async let ollamaTask: () = refreshOllamaConnection()
async let lmStudioTask: () = refreshLMStudioConnection()

_ = await (ollamaTask, lmStudioTask)
}

/// Refresh Ollama connection
public func refreshOllamaConnection() async {
guard ollamaProvider.configuration.isEnabled else {
logger.debug("Ollama is disabled, skipping connection check")
return
}

let connected = await ollamaProvider.checkConnection()
logger.info("Ollama connection: \(connected ? "success" : "failed")")
}

/// Refresh LM Studio connection
public func refreshLMStudioConnection() async {
guard lmStudioProvider.configuration.isEnabled else {
logger.debug("LM Studio is disabled, skipping connection check")
return
}

let connected = await lmStudioProvider.checkConnection()
logger.info("LM Studio connection: \(connected ? "success" : "failed")")
}

// MARK: - Configuration

/// Update Ollama configuration
public func updateOllamaConfiguration(_ config: LocalLLMConfiguration) {
ollamaProvider.configuration = config

if config.isEnabled {
Task {
await refreshOllamaConnection()
}
}

updateAllModels()
saveSettings()
}

/// Update LM Studio configuration
public func updateLMStudioConfiguration(_ config: LocalLLMConfiguration) {
lmStudioProvider.configuration = config

if config.isEnabled {
Task {
await refreshLMStudioConnection()
}
}

updateAllModels()
saveSettings()
}

/// Enable/disable a provider
public func setProviderEnabled(_ type: LocalLLMProviderType, enabled: Bool) {
switch type {
case .ollama:
var config = ollamaProvider.configuration
config.isEnabled = enabled
updateOllamaConfiguration(config)
case .lmStudio:
var config = lmStudioProvider.configuration
config.isEnabled = enabled
updateLMStudioConfiguration(config)
}
}

// MARK: - Chat

/// Send a chat request using the active provider and selected model
public func chat(
messages: [LocalLLMMessage],
options: LocalLLMOptions = .default
) -> AnyPublisher<LocalLLMChatResponseChunk, Error> {
guard let provider = activeProvider else {
return Fail(error: LocalLLMError.notConnected).eraseToAnyPublisher()
}

guard let modelId = selectedModelId else {
return Fail(error: LocalLLMError.modelNotFound("No model selected")).eraseToAnyPublisher()
}

let request = LocalLLMChatRequest(
model: modelId,
messages: messages,
options: options,
stream: true
)

return provider.chat(request: request)
}

/// Send a chat request with specific provider and model
public func chat(
provider type: LocalLLMProviderType,
model: String,
messages: [LocalLLMMessage],
options: LocalLLMOptions = .default,
stream: Bool = true
) -> AnyPublisher<LocalLLMChatResponseChunk, Error> {
let provider = provider(for: type)

let request = LocalLLMChatRequest(
model: model,
messages: messages,
options: options,
stream: stream
)

return provider.chat(request: request)
}

/// Cancel all ongoing requests
public func cancelAllRequests() {
ollamaProvider.cancelAllRequests()
lmStudioProvider.cancelAllRequests()
}

// MARK: - Settings Persistence

private struct LocalLLMSettings: Codable {
var ollamaConfig: LocalLLMConfiguration
var lmStudioConfig: LocalLLMConfiguration
var activeProviderType: LocalLLMProviderType?
var selectedModelId: String?
}

private static func loadSettings() -> LocalLLMSettings {
guard let data = UserDefaults.standard.data(forKey: "LocalLLMSettings"),
let settings = try? JSONDecoder().decode(LocalLLMSettings.self, from: data)
else {
// Return default settings
return LocalLLMSettings(
ollamaConfig: LocalLLMConfiguration(
host: "localhost",
port: 11434,
isEnabled: false
),
lmStudioConfig: LocalLLMConfiguration(
host: "localhost",
port: 1234,
isEnabled: false
),
activeProviderType: nil,
selectedModelId: nil
)
}
return settings
}

private func saveSettings() {
let settings = LocalLLMSettings(
ollamaConfig: ollamaProvider.configuration,
lmStudioConfig: lmStudioProvider.configuration,
activeProviderType: activeProviderType,
selectedModelId: selectedModelId
)

if let data = try? JSONEncoder().encode(settings) {
UserDefaults.standard.set(data, forKey: settingsKey)
logger.debug("Saved local LLM settings")
}
}
}

// MARK: - Convenience Extensions

public extension LocalLLMManager {
/// Check if local LLM is currently usable
var isLocalLLMAvailable: Bool {
isAnyProviderConnected && selectedModelId != nil
}

/// Get a display name for the current local LLM configuration
var displayName: String {
guard let model = selectedModel else {
return "Local LLM (Not configured)"
}
return "\(model.provider.rawValue): \(model.name)"
}

/// Quick setup for Ollama with default settings
func enableOllama(host: String = "localhost", port: Int = 11434) {
let config = LocalLLMConfiguration(
host: host,
port: port,
isEnabled: true
)
updateOllamaConfiguration(config)
activeProviderType = .ollama
}

/// Quick setup for LM Studio with default settings
func enableLMStudio(host: String = "localhost", port: Int = 1234) {
let config = LocalLLMConfiguration(
host: host,
port: port,
isEnabled: true
)
updateLMStudioConfiguration(config)
activeProviderType = .lmStudio
}
}
Loading