mirror of
https://github.com/mii443/AzooKeyKanaKanjiConverter.git
synced 2025-12-03 02:58:27 +00:00
log file
This commit is contained in:
@@ -2,6 +2,28 @@ package import Foundation
|
||||
import SwiftUtils
|
||||
import EfficientNGram
|
||||
|
||||
private func debugLog(_ items: Any..., separator: String = " ", terminator: String = "\n") {
|
||||
let message = items.map { "\($0)" }.joined(separator: separator) + terminator
|
||||
let timestamp = ISO8601DateFormatter().string(from: Date())
|
||||
let logEntry = "[\(timestamp)] \(message)"
|
||||
|
||||
print(logEntry, terminator: "")
|
||||
debug(message)
|
||||
|
||||
let debugLogFile = FileManager.default.temporaryDirectory.appendingPathComponent("zenz_debug_\(ProcessInfo.processInfo.processIdentifier).log")
|
||||
if let data = logEntry.data(using: .utf8) {
|
||||
if !FileManager.default.fileExists(atPath: debugLogFile.path) {
|
||||
FileManager.default.createFile(atPath: debugLogFile.path, contents: data)
|
||||
} else {
|
||||
if let fileHandle = try? FileHandle(forWritingTo: debugLogFile) {
|
||||
fileHandle.seekToEndOfFile()
|
||||
fileHandle.write(data)
|
||||
fileHandle.closeFile()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@MainActor package final class Zenz {
|
||||
package var resourceURL: URL
|
||||
package let ngl: Int
|
||||
@@ -9,8 +31,7 @@ import EfficientNGram
|
||||
init(resourceURL: URL, ngl: Int = 0) throws {
|
||||
self.resourceURL = resourceURL
|
||||
self.ngl = ngl
|
||||
print("[Zenz] init called with ngl: \(ngl)")
|
||||
debug("Zenz.init called with ngl: \(ngl)")
|
||||
debugLog("[Zenz] init called with resourceURL: \(resourceURL), ngl: \(ngl)")
|
||||
do {
|
||||
#if canImport(Darwin)
|
||||
if #available(iOS 16, macOS 13, *) {
|
||||
@@ -23,8 +44,7 @@ import EfficientNGram
|
||||
// this is not percent-encoded
|
||||
self.zenzContext = try ZenzContext.createContext(path: resourceURL.path, ngl: ngl)
|
||||
#endif
|
||||
print("[Zenz] Loaded model \(resourceURL.lastPathComponent)")
|
||||
debug("Loaded model \(resourceURL.lastPathComponent)")
|
||||
debugLog("[Zenz] Loaded model \(resourceURL.lastPathComponent) successfully with ngl=\(ngl)")
|
||||
} catch {
|
||||
throw error
|
||||
}
|
||||
|
||||
@@ -9,6 +9,30 @@ import Algorithms
|
||||
import Foundation
|
||||
import EfficientNGram
|
||||
|
||||
private let debugLogFile = FileManager.default.temporaryDirectory.appendingPathComponent("zenz_debug_\(ProcessInfo.processInfo.processIdentifier).log")
|
||||
|
||||
private func debugLog(_ items: Any..., separator: String = " ", terminator: String = "\n") {
|
||||
let message = items.map { "\($0)" }.joined(separator: separator) + terminator
|
||||
let timestamp = ISO8601DateFormatter().string(from: Date())
|
||||
let logEntry = "[\(timestamp)] \(message)"
|
||||
|
||||
print(logEntry, terminator: "")
|
||||
debug(message)
|
||||
|
||||
if let data = logEntry.data(using: .utf8) {
|
||||
if !FileManager.default.fileExists(atPath: debugLogFile.path) {
|
||||
FileManager.default.createFile(atPath: debugLogFile.path, contents: data)
|
||||
print("[DEBUG] Created log file at: \(debugLogFile.path)")
|
||||
} else {
|
||||
if let fileHandle = try? FileHandle(forWritingTo: debugLogFile) {
|
||||
fileHandle.seekToEndOfFile()
|
||||
fileHandle.write(data)
|
||||
fileHandle.closeFile()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct FixedSizeHeap<Element: Comparable> {
|
||||
private var size: Int
|
||||
private var heap: Heap<Element>
|
||||
@@ -102,12 +126,10 @@ final class ZenzContext {
|
||||
}
|
||||
|
||||
static func createContext(path: String, ngl: Int = 0) throws -> ZenzContext {
|
||||
print("[ZenzContext] createContext called with ngl: \(ngl)")
|
||||
debug("ZenzContext.createContext called with ngl: \(ngl)")
|
||||
debugLog("[ZenzContext] createContext called with path: \(path), ngl: \(ngl)")
|
||||
llama_backend_init()
|
||||
var model_params = llama_model_default_params()
|
||||
print("[ZenzContext] Default model_params.n_gpu_layers: \(model_params.n_gpu_layers)")
|
||||
debug("Default model_params.n_gpu_layers: \(model_params.n_gpu_layers)")
|
||||
debugLog("[ZenzContext] Default model_params.n_gpu_layers: \(model_params.n_gpu_layers)")
|
||||
|
||||
// IMPORTANT: Set fields in the same order as C struct definition
|
||||
// model_params.devices is already NULL by default
|
||||
@@ -118,23 +140,26 @@ final class ZenzContext {
|
||||
model_params.use_mmap = true
|
||||
|
||||
if ngl > 0 {
|
||||
print("[ZenzContext] Requesting \(ngl) layers to be offloaded to GPU")
|
||||
debug("Requesting \(ngl) layers to be offloaded to GPU")
|
||||
debugLog("[ZenzContext] Requesting \(ngl) layers to be offloaded to GPU")
|
||||
}
|
||||
print("[ZenzContext] After setting, model_params.n_gpu_layers: \(model_params.n_gpu_layers)")
|
||||
debug("After setting, model_params.n_gpu_layers: \(model_params.n_gpu_layers)")
|
||||
print("[ZenzContext] Loading model from: \(path)")
|
||||
debug("Loading model from: \(path)")
|
||||
// Try to verify the struct layout
|
||||
debugLog("[ZenzContext] After setting, model_params.n_gpu_layers: \(model_params.n_gpu_layers)")
|
||||
debugLog("[ZenzContext] Loading model from: \(path)")
|
||||
|
||||
// Log struct details
|
||||
withUnsafePointer(to: &model_params) { ptr in
|
||||
debug("model_params address: \(ptr)")
|
||||
debugLog("[ZenzContext] model_params address: \(ptr)")
|
||||
withUnsafeBytes(of: model_params) { bytes in
|
||||
let hexString = bytes.map { String(format: "%02X", $0) }.joined(separator: " ")
|
||||
debugLog("[ZenzContext] model_params bytes (first 32): \(String(hexString.prefix(95)))...")
|
||||
}
|
||||
}
|
||||
debugLog("[ZenzContext] Calling llama_model_load_from_file...")
|
||||
let model = llama_model_load_from_file(path, model_params)
|
||||
guard let model else {
|
||||
debug("Could not load model at \(path)")
|
||||
debugLog("[ZenzContext] ERROR: Could not load model at \(path)")
|
||||
throw ZenzError.couldNotLoadModel(path: path)
|
||||
}
|
||||
debug("Model loaded successfully")
|
||||
debugLog("[ZenzContext] Model loaded successfully")
|
||||
|
||||
let context = llama_init_from_model(model, ctx_params)
|
||||
guard let context else {
|
||||
|
||||
@@ -68,31 +68,52 @@ import EfficientNGram
|
||||
return (mode, baseModel, personalModel)
|
||||
}
|
||||
|
||||
private func debugLog(_ items: Any..., separator: String = " ", terminator: String = "\n") {
|
||||
let message = items.map { "\($0)" }.joined(separator: separator) + terminator
|
||||
let timestamp = ISO8601DateFormatter().string(from: Date())
|
||||
let logEntry = "[\(timestamp)] \(message)"
|
||||
|
||||
print(logEntry, terminator: "")
|
||||
debug(message)
|
||||
|
||||
let debugLogFile = FileManager.default.temporaryDirectory.appendingPathComponent("zenz_debug_\(ProcessInfo.processInfo.processIdentifier).log")
|
||||
if let data = logEntry.data(using: .utf8) {
|
||||
if !FileManager.default.fileExists(atPath: debugLogFile.path) {
|
||||
FileManager.default.createFile(atPath: debugLogFile.path, contents: data)
|
||||
} else {
|
||||
if let fileHandle = try? FileHandle(forWritingTo: debugLogFile) {
|
||||
fileHandle.seekToEndOfFile()
|
||||
fileHandle.write(data)
|
||||
fileHandle.closeFile()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
package func getModel(modelURL: URL, ngl: Int = 0) -> Zenz? {
|
||||
print("[KanaKanjiConverter] getModel called with ngl: \(ngl)")
|
||||
debug("KanaKanjiConverter.getModel called with ngl: \(ngl)")
|
||||
debugLog("[KanaKanjiConverter] getModel called with modelURL: \(modelURL), ngl: \(ngl)")
|
||||
if let model = self.zenz, model.resourceURL == modelURL && model.ngl == ngl {
|
||||
self.zenzStatus = "load \(modelURL.absoluteString)"
|
||||
print("[KanaKanjiConverter] Returning cached model with matching ngl: \(ngl)")
|
||||
debug("Returning cached model with matching ngl: \(ngl)")
|
||||
debugLog("[KanaKanjiConverter] Returning cached model with matching ngl: \(ngl)")
|
||||
return model
|
||||
} else {
|
||||
do {
|
||||
print("[KanaKanjiConverter] Creating new model with ngl: \(ngl)")
|
||||
debug("Creating new model with ngl: \(ngl)")
|
||||
debugLog("[KanaKanjiConverter] Creating new model with ngl: \(ngl)")
|
||||
self.zenz = try Zenz(resourceURL: modelURL, ngl: ngl)
|
||||
self.zenzStatus = "load \(modelURL.absoluteString)"
|
||||
return self.zenz
|
||||
} catch {
|
||||
self.zenzStatus = "load \(modelURL.absoluteString) " + error.localizedDescription
|
||||
debugLog("[KanaKanjiConverter] ERROR creating model: \(error)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func predictNextCharacter(leftSideContext: String, count: Int, options: ConvertRequestOptions) -> [(character: Character, value: Float)] {
|
||||
debugLog("[KanaKanjiConverter] predictNextCharacter called with zenzaiMode.ngl: \(options.zenzaiMode.ngl)")
|
||||
guard let zenz = self.getModel(modelURL: options.zenzaiMode.weightURL, ngl: options.zenzaiMode.ngl) else {
|
||||
print("zenz-v2 model unavailable")
|
||||
debugLog("[KanaKanjiConverter] zenz-v2 model unavailable")
|
||||
return []
|
||||
}
|
||||
guard options.zenzaiMode.versionDependentMode.version == .v2 else {
|
||||
@@ -619,6 +640,9 @@ import EfficientNGram
|
||||
}
|
||||
|
||||
// FIXME: enable cache based zenzai
|
||||
if zenzaiMode.enabled {
|
||||
debugLog("[KanaKanjiConverter] convertToLattice: zenzaiMode enabled with ngl: \(zenzaiMode.ngl)")
|
||||
}
|
||||
if zenzaiMode.enabled, let model = self.getModel(modelURL: zenzaiMode.weightURL, ngl: zenzaiMode.ngl) {
|
||||
let (result, nodes, cache) = self.converter.all_zenzai(
|
||||
inputData,
|
||||
|
||||
Reference in New Issue
Block a user