+ );
+}
\ No newline at end of file
diff --git a/src/data/locales/da/Benchmark.json b/src/data/locales/da/Benchmark.json
new file mode 100644
index 0000000..738869b
--- /dev/null
+++ b/src/data/locales/da/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Electron BitNet Benchmarking Tool",
+ "description": "Benchmark din Microsoft BitNet 1-bit LLM-model nedenfor, idet du husker på, at større testvariabler tager længere tid at køre.",
+ "commandOptions": "Kommandoindstillinger",
+ "numberOfTokens": "Antal tokens",
+ "numberOfTokensInfo": "Angiv antallet af tokens, der skal genereres under benchmark.",
+ "model": "Model",
+ "modelInfo": "Indtast stien til den kvantiserede '{{fileFormat}}' modelfil genereret ved hjælp af Microsofts BitNet '{{script}}' script.",
+ "threads": "Tråde",
+ "threadsInfo": "Angiv antallet af tråde, der skal bruges til benchmark.",
+ "promptLength": "Spørglængde",
+ "promptLengthInfo": "Angiv længden af den prompt, der skal genereres tekst fra.",
+ "runBenchmark": "Kør Benchmark",
+ "stopBenchmark": "Stop Benchmark",
+ "log": "Benchmark log",
+ "license": "Licenseret under {{license}}",
+ "builtWith": "Bygget med"
+}
\ No newline at end of file
diff --git a/src/data/locales/da/Home.json b/src/data/locales/da/Home.json
index 6bbe5ad..72d4c8c 100644
--- a/src/data/locales/da/Home.json
+++ b/src/data/locales/da/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Electron BitNet Inference Tool",
- "description": "Microsoft udgav bitnet.cpp som deres officielle inferensramme for 1-bit LLM'er (f.eks. BitNet b1.58), som kører på CPU'er; ",
- "commandOptions": "Kommandoindstillinger",
- "numberOfTokens": "Antal tokens at forudsige",
- "model": "Model",
- "threads": "Tråde",
- "contextSize": "Kontekststørrelse",
- "temperature": "Temperatur",
- "prompt": "Hurtig",
- "numberOfTokensInfo": "Dette er mængden af tokens (ord), der skal genereres ved at køre inferensrammen.",
- "modelInfo": "Indtast stien til den kvantiserede '{{fileFormat}}' modelfil genereret ved hjælp af Microsofts BitNet '{{script}}' script.",
- "threadsInfo": "Antallet af tråde, der skal bruges til at køre slutningen. ",
- "contextSizeInfo": "Størrelsen af promptkonteksten. ",
- "temperatureInfo": "En hyperparameter, der styrer tilfældigheden af den genererede tekst. ",
- "promptInfo": "Prompten til at generere tekst fra. ",
- "runInference": "Kør Inference",
- "stopInference": "Stop inferens",
- "response": "Svar",
- "license": "{{license}} Licenseret kode",
- "builtWith": "bygget med"
- }
\ No newline at end of file
+ "title": "Electron BitNet Inference Tool",
+ "description": "Microsoft frigav BitNet som deres officielle inferensramme for 1-bit LLM'er, der kører på CPU'er, prøv det nedenfor!",
+ "commandOptions": "Kommandoindstillinger",
+ "numberOfTokens": "Antal tokens at forudsige",
+ "model": "Model",
+ "threads": "Tråde",
+ "contextSize": "Kontekststørrelse",
+ "temperature": "Temperatur",
+ "prompt": "Hurtig",
+ "numberOfTokensInfo": "Dette er mængden af tokens (ord), der skal genereres ved at køre slutningsrammen.",
+ "modelInfo": "Indtast stien til den kvantiserede '{{fileFormat}}' modelfil genereret ved hjælp af Microsofts BitNet '{{script}}' script.",
+ "threadsInfo": "Antallet af tråde, der skal bruges til at køre slutningen, begrænset til antallet af tilgængelige tråde på CPU'en.",
+ "contextSizeInfo": "Størrelsen af promptkonteksten bestemmer, hvor meget af prompten, der tages i betragtning under inferens.",
+ "temperatureInfo": "Et hyperparameter, der styrer tilfældigheden af den genererede tekst, lavere værdier gør teksten mere deterministisk.",
+ "promptInfo": "Dette er den indledende tekst, som modellen vil bruge til at begynde at generere output.",
+ "runInference": "Kør Inference",
+ "stopInference": "Stop inferens",
+ "response": "Svar",
+ "license": "{{license}} Licenseret kode",
+ "builtWith": "bygget med"
+}
\ No newline at end of file
diff --git a/src/data/locales/da/PageHeader.json b/src/data/locales/da/PageHeader.json
index 622818b..bbf209d 100644
--- a/src/data/locales/da/PageHeader.json
+++ b/src/data/locales/da/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "koreansk ({{locale}})",
"portuguese": "portugisisk ({{locale}})",
"thai": "Thai ({{locale}})",
+ "taiwanese": "taiwansk ({{locale}})",
"index": "Inferens Dashboard",
+ "benchmark": "Benchmark modeller",
+ "perplexity": "Beregn forvirring",
"back": "Gå tilbage",
"llmFunctionality": "LLM funktionalitet",
"about": "Om"
diff --git a/src/data/locales/da/Perplexity.json b/src/data/locales/da/Perplexity.json
new file mode 100644
index 0000000..df79993
--- /dev/null
+++ b/src/data/locales/da/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Electron BitNet Perplexity Tool",
+ "description": "Beregn forvirringen (modellens tillid til at forudsige det næste ord) af din BitNet-model nedenfor.",
+ "commandOptions": "Kommandoindstillinger",
+ "prompt": "Hurtig",
+ "promptInfo": "Dette er den indledende tekst, som modellen vil bruge til at begynde at generere output.",
+ "model": "Model",
+ "modelInfo": "Indtast stien til den kvantiserede '{{fileFormat}}' modelfil genereret ved hjælp af Microsofts '{{script}}' script.",
+ "threads": "Tråde",
+ "threadsInfo": "Antallet af tråde, der skal bruges til at køre forvirringsberegningen.",
+ "contextSize": "Kontekststørrelse",
+ "contextSizeInfo": "Størrelsen af promptkonteksten bestemmer, hvor meget af prompten, der tages i betragtning under forvirringsberegningen.",
+ "pplStride": "Forvirring skridt",
+ "pplStrideInfo": "Skridt for forvirring beregning.",
+ "pplOutputType": "Perplexity Output Type",
+ "pplOutputTypeInfo": "Outputtype til forvirringsberegning.",
+ "runPerplexity": "Beregn forvirring",
+ "stopPerplexity": "Stop beregningen",
+ "log": "Beregnet perplexitetsresultat",
+ "error": "Fejl",
+ "license": "{{license}} Licenseret kode",
+ "builtWith": "bygget med",
+ "insufficientPromptTokens": "Utilstrækkelige prompt-tokens, dobbelt kontekststørrelse i tokens er påkrævet for at fortsætte."
+ }
\ No newline at end of file
diff --git a/src/data/locales/de/Benchmark.json b/src/data/locales/de/Benchmark.json
new file mode 100644
index 0000000..7bc174d
--- /dev/null
+++ b/src/data/locales/de/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Electron BitNet Benchmarking-Tool",
+ "description": "Führen Sie unten einen Vergleich Ihres Microsoft BitNet 1-Bit-LLM-Modells durch. Beachten Sie dabei, dass die Ausführung größerer Testvariablen länger dauert.",
+ "commandOptions": "Befehlsoptionen",
+ "numberOfTokens": "Anzahl der Token",
+ "numberOfTokensInfo": "Geben Sie die Anzahl der Token an, die während des Benchmarks generiert werden sollen.",
+ "model": "Modell",
+ "modelInfo": "Geben Sie den Pfad zum quantisierten ' ein.{{fileFormat}}' Modelldatei, generiert mit Microsofts BitNet '{{script}}' Skript.",
+ "threads": "Themen",
+ "threadsInfo": "Geben Sie die Anzahl der Threads an, die für den Benchmark verwendet werden sollen.",
+ "promptLength": "Schnelle Länge",
+ "promptLengthInfo": "Geben Sie die Länge der Eingabeaufforderung an, aus der Text generiert werden soll.",
+ "runBenchmark": "Benchmark ausführen",
+ "stopBenchmark": "Benchmark stoppen",
+ "log": "Benchmark-Protokoll",
+ "license": "Lizenziert unter {{license}}",
+ "builtWith": "Gebaut mit"
+}
\ No newline at end of file
diff --git a/src/data/locales/de/Home.json b/src/data/locales/de/Home.json
index bd7b707..22024eb 100644
--- a/src/data/locales/de/Home.json
+++ b/src/data/locales/de/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Electron BitNet-Inferenztool",
- "description": "Microsoft hat bitnet.cpp als offizielles Inferenz-Framework für 1-Bit-LLMs (z. B. BitNet b1.58) veröffentlicht, das auf CPUs läuft; ",
- "commandOptions": "Befehlsoptionen",
- "numberOfTokens": "Anzahl der vorherzusagenden Token",
- "model": "Modell",
- "threads": "Themen",
- "contextSize": "Kontextgröße",
- "temperature": "Temperatur",
- "prompt": "Prompt",
- "numberOfTokensInfo": "Dies ist die Menge an Token (Wörtern), die durch die Ausführung des Inferenz-Frameworks generiert werden sollen.",
- "modelInfo": "Geben Sie den Pfad zum quantisierten ' ein.{{fileFormat}}'Modelldatei, die mit dem BitNet von Microsoft generiert wurde'{{script}}' Skript.",
- "threadsInfo": "Die Anzahl der Threads, die zum Ausführen der Inferenz verwendet werden sollen. ",
- "contextSizeInfo": "Größe des Eingabeaufforderungskontexts. ",
- "temperatureInfo": "Ein Hyperparameter, der die Zufälligkeit des generierten Texts steuert. ",
- "promptInfo": "Die Eingabeaufforderung, aus der Text generiert werden soll. ",
- "runInference": "Inferenz ausführen",
- "stopInference": "Stoppen Sie die Schlussfolgerung",
- "response": "Antwort",
- "license": "{{license}} Lizenzierter Code",
- "builtWith": "gebaut mit"
- }
\ No newline at end of file
+ "title": "Electron BitNet-Inferenztool",
+ "description": "Microsoft hat BitNet als offizielles Inferenz-Framework für 1-Bit-LLMs veröffentlicht, das auf CPUs läuft. Probieren Sie es unten aus!",
+ "commandOptions": "Befehlsoptionen",
+ "numberOfTokens": "Anzahl der vorherzusagenden Token",
+ "model": "Modell",
+ "threads": "Themen",
+ "contextSize": "Kontextgröße",
+ "temperature": "Temperatur",
+ "prompt": "Prompt",
+ "numberOfTokensInfo": "Dies ist die Menge an Token (Wörtern), die durch die Ausführung des Inferenz-Frameworks generiert werden sollen.",
+ "modelInfo": "Geben Sie den Pfad zum quantisierten ' ein.{{fileFormat}}' Modelldatei, generiert mit Microsofts BitNet '{{script}}' Skript.",
+ "threadsInfo": "Die Anzahl der Threads, die zum Ausführen der Inferenz verwendet werden sollen, begrenzt durch die Anzahl der auf der CPU verfügbaren Threads.",
+ "contextSizeInfo": "Die Größe des Eingabeaufforderungskontexts bestimmt, wie viel von der Eingabeaufforderung bei der Inferenz berücksichtigt wird.",
+ "temperatureInfo": "Ein Hyperparameter, der die Zufälligkeit des generierten Texts steuert. Niedrigere Werte machen den Text deterministischer.",
+ "promptInfo": "Dies ist der Anfangstext, den das Modell verwendet, um mit der Generierung der Ausgabe zu beginnen.",
+ "runInference": "Inferenz ausführen",
+ "stopInference": "Stoppen Sie die Schlussfolgerung",
+ "response": "Antwort",
+ "license": "{{license}} Lizenzierter Code",
+ "builtWith": "gebaut mit"
+}
\ No newline at end of file
diff --git a/src/data/locales/de/PageHeader.json b/src/data/locales/de/PageHeader.json
index 67600b0..52fa360 100644
--- a/src/data/locales/de/PageHeader.json
+++ b/src/data/locales/de/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "Koreanisch ({{locale}})",
"portuguese": "Portugiesisch ({{locale}})",
"thai": "Thailändisch ({{locale}})",
+ "taiwanese": "Taiwanesisch ({{locale}})",
"index": "Inferenz-Dashboard",
+ "benchmark": "Benchmark-Modelle",
+ "perplexity": "Berechnen Sie die Ratlosigkeit",
"back": "Geh zurück",
"llmFunctionality": "LLM-Funktionalität",
"about": "Um"
diff --git a/src/data/locales/de/Perplexity.json b/src/data/locales/de/Perplexity.json
new file mode 100644
index 0000000..345b3e5
--- /dev/null
+++ b/src/data/locales/de/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Electron BitNet Perplexity Tool",
+ "description": "Berechnen Sie unten die Perplexität (das Vertrauen des Modells bei der Vorhersage des nächsten Wortes) Ihres BitNet-Modells.",
+ "commandOptions": "Befehlsoptionen",
+ "prompt": "Prompt",
+ "promptInfo": "Dies ist der Anfangstext, den das Modell verwendet, um mit der Generierung der Ausgabe zu beginnen.",
+ "model": "Modell",
+ "modelInfo": "Geben Sie den Pfad zum quantisierten ' ein.{{fileFormat}}' Modelldatei, generiert mit Microsofts '{{script}}' Skript.",
+ "threads": "Themen",
+ "threadsInfo": "Die Anzahl der Threads, die zum Ausführen der Perplexitätsberechnung verwendet werden sollen.",
+ "contextSize": "Kontextgröße",
+ "contextSizeInfo": "Die Größe des Eingabeaufforderungskontexts bestimmt, wie viel von der Eingabeaufforderung bei der Perplexitätsberechnung berücksichtigt wird.",
+ "pplStride": "Ratlosigkeitsschritt",
+ "pplStrideInfo": "Schritt für Schritt zur Ratlosigkeitsberechnung.",
+ "pplOutputType": "Perplexity-Ausgabetyp",
+ "pplOutputTypeInfo": "Ausgabetyp für Ratlosigkeitsberechnung.",
+ "runPerplexity": "Berechnen Sie die Ratlosigkeit",
+ "stopPerplexity": "Berechnung stoppen",
+ "log": "Berechnetes Ratlosigkeitsergebnis",
+ "error": "Fehler",
+ "license": "{{license}} Lizenzierter Code",
+ "builtWith": "gebaut mit",
+ "insufficientPromptTokens": "Nicht genügend Eingabeaufforderungstoken. Zum Fortfahren ist die doppelte Kontextgröße in Token erforderlich."
+ }
\ No newline at end of file
diff --git a/src/data/locales/en/Benchmark.json b/src/data/locales/en/Benchmark.json
new file mode 100644
index 0000000..1e86853
--- /dev/null
+++ b/src/data/locales/en/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Electron BitNet Benchmarking Tool",
+ "description": "Benchmark your Microsoft BitNet 1-bit LLM model below, bearing in mind that larger test variables take longer to run.",
+ "commandOptions": "Command Options",
+ "numberOfTokens": "Number of Tokens",
+ "numberOfTokensInfo": "Specify the number of tokens to generate during the benchmark.",
+ "model": "Model",
+ "modelInfo": "Input the path to the quantized '{{fileFormat}}' model file generated using Microsoft's BitNet '{{script}}' script.",
+ "threads": "Threads",
+ "threadsInfo": "Specify the number of threads to use for the benchmark.",
+ "promptLength": "Prompt Length",
+ "promptLengthInfo": "Specify the length of the prompt to generate text from.",
+ "runBenchmark": "Run Benchmark",
+ "stopBenchmark": "Stop Benchmark",
+ "log": "Benchmark Log",
+ "license": "Licensed under {{license}}",
+ "builtWith": "Built with"
+}
\ No newline at end of file
diff --git a/src/data/locales/en/Home.json b/src/data/locales/en/Home.json
index 794a131..2db0896 100644
--- a/src/data/locales/en/Home.json
+++ b/src/data/locales/en/Home.json
@@ -1,6 +1,6 @@
{
"title": "Electron BitNet Inference Tool",
- "description": "Microsoft released bitnet.cpp as their official inference framework for 1-bit LLMs (e.g., BitNet b1.58) which runs on CPUs; enjoy!",
+ "description": "Microsoft released BitNet as their official inference framework for 1-bit LLMs which runs on CPUs, try it out below!",
"commandOptions": "Command Options",
"numberOfTokens": "Number of tokens to predict",
"model": "Model",
@@ -9,11 +9,11 @@
"temperature": "Temperature",
"prompt": "Prompt",
"numberOfTokensInfo": "This is the quantity of tokens (words) to generate from running the inference framework.",
- "modelInfo": "Input the path to the quantized '{{fileFormat}}' model file generated using the Microsoft's BitNet '{{script}}' script.",
- "threadsInfo": "The number of threads to use for running the inference. Default value is 2.",
- "contextSizeInfo": "Size of the prompt context. This determines how much of the prompt is considered during inference.",
- "temperatureInfo": "A hyperparameter that controls the randomness of the generated text. Lower values make the text more deterministic.",
- "promptInfo": "The prompt to generate text from. This is the initial text that the model will use to start generating the output.",
+ "modelInfo": "Input the path to the quantized '{{fileFormat}}' model file generated using Microsoft's BitNet '{{script}}' script.",
+ "threadsInfo": "The number of threads to use for running the inference, limited to the number of threads available on the CPU.",
+ "contextSizeInfo": "The size of the prompt context determines how much of the prompt is considered during inference.",
+ "temperatureInfo": "A hyperparameter that controls the randomness of the generated text, lower values make the text more deterministic.",
+ "promptInfo": "This is the initial text that the model will use to start generating the output.",
"runInference": "Run Inference",
"stopInference": "Stop Inference",
"response": "Response",
diff --git a/src/data/locales/en/PageHeader.json b/src/data/locales/en/PageHeader.json
index ac8f6e7..f853f52 100644
--- a/src/data/locales/en/PageHeader.json
+++ b/src/data/locales/en/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "Korean ({{locale}})",
"portuguese": "Portuguese ({{locale}})",
"thai": "Thai ({{locale}})",
+ "taiwanese": "Taiwanese ({{locale}})",
"index": "Inference Dashboard",
+ "benchmark": "Benchmark Models",
+ "perplexity": "Calculate Perplexity",
"back": "Go back",
"llmFunctionality": "LLM Functionality",
"about": "About"
diff --git a/src/data/locales/en/Perplexity.json b/src/data/locales/en/Perplexity.json
new file mode 100644
index 0000000..ff8790c
--- /dev/null
+++ b/src/data/locales/en/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Electron BitNet Perplexity Tool",
+ "description": "Calculate the perplexity (the model's confidence in predicting the next word) of your BitNet model below.",
+ "commandOptions": "Command Options",
+ "prompt": "Prompt",
+ "promptInfo": "This is the initial text that the model will use to start generating the output.",
+ "model": "Model",
+ "modelInfo": "Input the path to the quantized '{{fileFormat}}' model file generated using Microsoft's '{{script}}' script.",
+ "threads": "Threads",
+ "threadsInfo": "The number of threads to use for running the perplexity calculation.",
+ "contextSize": "Context size",
+ "contextSizeInfo": "The size of the prompt context determines how much of the prompt is considered during perplexity calculation.",
+ "pplStride": "Perplexity Stride",
+ "pplStrideInfo": "Stride for perplexity calculation.",
+ "pplOutputType": "Perplexity Output Type",
+ "pplOutputTypeInfo": "Output type for perplexity calculation.",
+ "runPerplexity": "Calculate Perplexity",
+ "stopPerplexity": "Stop calculation",
+ "log": "Calculated Perplexity Result",
+ "error": "Error",
+ "license": "{{license}} Licensed code",
+ "builtWith": "built with",
+ "insufficientPromptTokens": "Insufficient prompt tokens, double the context size in tokens is required to proceed."
+}
\ No newline at end of file
diff --git a/src/data/locales/es/Benchmark.json b/src/data/locales/es/Benchmark.json
new file mode 100644
index 0000000..bff0bca
--- /dev/null
+++ b/src/data/locales/es/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Herramienta de evaluación comparativa Electron BitNet",
+ "description": "Compare su modelo LLM de 1 bit de Microsoft BitNet a continuación, teniendo en cuenta que las variables de prueba más grandes tardan más en ejecutarse.",
+ "commandOptions": "Opciones de comando",
+ "numberOfTokens": "Número de fichas",
+ "numberOfTokensInfo": "Especifique la cantidad de tokens que se generarán durante la prueba comparativa.",
+ "model": "Modelo",
+ "modelInfo": "Ingrese la ruta al 'cuantizado'{{fileFormat}}'archivo de modelo generado utilizando BitNet de Microsoft'{{script}}' guion.",
+ "threads": "Trapos",
+ "threadsInfo": "Especifique el número de subprocesos que se utilizarán para la prueba comparativa.",
+ "promptLength": "Longitud del mensaje",
+ "promptLengthInfo": "Especifique la longitud del mensaje a partir del cual generar texto.",
+ "runBenchmark": "Ejecutar punto de referencia",
+ "stopBenchmark": "Detener punto de referencia",
+ "log": "Registro de referencia",
+ "license": "Licenciado bajo {{license}}",
+ "builtWith": "Construido con"
+}
\ No newline at end of file
diff --git a/src/data/locales/es/Home.json b/src/data/locales/es/Home.json
index aeb1d1d..752c9fa 100644
--- a/src/data/locales/es/Home.json
+++ b/src/data/locales/es/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Herramienta de inferencia Electron BitNet",
- "description": "Microsoft lanzó bitnet.cpp como su marco de inferencia oficial para LLM de 1 bit (por ejemplo, BitNet b1.58) que se ejecuta en CPU; ",
- "commandOptions": "Opciones de comando",
- "numberOfTokens": "Número de tokens para predecir",
- "model": "Modelo",
- "threads": "Trapos",
- "contextSize": "Tamaño del contexto",
- "temperature": "Temperatura",
- "prompt": "Inmediato",
- "numberOfTokensInfo": "Esta es la cantidad de tokens (palabras) que se generarán al ejecutar el marco de inferencia.",
- "modelInfo": "Ingrese la ruta al 'cuantizado'{{fileFormat}}'Archivo de modelo generado usando BitNet de Microsoft'{{script}}' guion.",
- "threadsInfo": "El número de subprocesos que se utilizarán para ejecutar la inferencia. ",
- "contextSizeInfo": "Tamaño del contexto del mensaje. ",
- "temperatureInfo": "Un hiperparámetro que controla la aleatoriedad del texto generado. ",
- "promptInfo": "El mensaje para generar texto. ",
- "runInference": "Ejecutar inferencia",
- "stopInference": "Detener la inferencia",
- "response": "Respuesta",
- "license": "{{license}} Código con licencia",
- "builtWith": "construido con"
- }
\ No newline at end of file
+ "title": "Herramienta de inferencia Electron BitNet",
+ "description": "Microsoft lanzó BitNet como su marco de inferencia oficial para LLM de 1 bit que se ejecuta en CPU. ¡Pruébelo a continuación!",
+ "commandOptions": "Opciones de comando",
+ "numberOfTokens": "Número de tokens para predecir",
+ "model": "Modelo",
+ "threads": "Trapos",
+ "contextSize": "Tamaño del contexto",
+ "temperature": "Temperatura",
+ "prompt": "Inmediato",
+ "numberOfTokensInfo": "Esta es la cantidad de tokens (palabras) que se generarán al ejecutar el marco de inferencia.",
+ "modelInfo": "Ingrese la ruta al 'cuantizado'{{fileFormat}}'archivo de modelo generado utilizando BitNet de Microsoft'{{script}}' guion.",
+ "threadsInfo": "La cantidad de subprocesos que se utilizarán para ejecutar la inferencia, limitada a la cantidad de subprocesos disponibles en la CPU.",
+ "contextSizeInfo": "El tamaño del contexto de la sugerencia determina qué parte de la sugerencia se considera durante la inferencia.",
+ "temperatureInfo": "Un hiperparámetro que controla la aleatoriedad del texto generado; los valores más bajos hacen que el texto sea más determinista.",
+ "promptInfo": "Este es el texto inicial que el modelo utilizará para comenzar a generar el resultado.",
+ "runInference": "Ejecutar inferencia",
+ "stopInference": "Detener la inferencia",
+ "response": "Respuesta",
+ "license": "{{license}} Código con licencia",
+ "builtWith": "construido con"
+}
\ No newline at end of file
diff --git a/src/data/locales/es/PageHeader.json b/src/data/locales/es/PageHeader.json
index d174033..9424aaf 100644
--- a/src/data/locales/es/PageHeader.json
+++ b/src/data/locales/es/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "coreano ({{locale}})",
"portuguese": "portugués ({{locale}})",
"thai": "tailandés ({{locale}})",
+ "taiwanese": "taiwanés ({{locale}})",
"index": "Panel de inferencia",
+ "benchmark": "Modelos de referencia",
+ "perplexity": "Calcular la perplejidad",
"back": "Volver",
"llmFunctionality": "Funcionalidad del Máster en Derecho",
"about": "Acerca de"
diff --git a/src/data/locales/es/Perplexity.json b/src/data/locales/es/Perplexity.json
new file mode 100644
index 0000000..ac82a85
--- /dev/null
+++ b/src/data/locales/es/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Herramienta de perplejidad Electron BitNet",
+ "description": "Calcule la perplejidad (la confianza del modelo para predecir la siguiente palabra) de su modelo BitNet a continuación.",
+ "commandOptions": "Opciones de comando",
+ "prompt": "Inmediato",
+ "promptInfo": "Este es el texto inicial que el modelo utilizará para comenzar a generar el resultado.",
+ "model": "Modelo",
+ "modelInfo": "Ingrese la ruta al 'cuantizado'{{fileFormat}}'archivo de modelo generado usando Microsoft'{{script}}' guion.",
+ "threads": "Trapos",
+ "threadsInfo": "El número de subprocesos que se utilizarán para ejecutar el cálculo de perplejidad.",
+ "contextSize": "Tamaño del contexto",
+ "contextSizeInfo": "El tamaño del contexto del mensaje determina qué parte del mensaje se considera durante el cálculo de perplejidad.",
+ "pplStride": "Paso de perplejidad",
+ "pplStrideInfo": "Paso para el cálculo de la perplejidad.",
+ "pplOutputType": "Tipo de salida de perplejidad",
+ "pplOutputTypeInfo": "Tipo de salida para el cálculo de perplejidad.",
+ "runPerplexity": "Calcular la perplejidad",
+ "stopPerplexity": "Detener cálculo",
+ "log": "Resultado de perplejidad calculado",
+ "error": "Error",
+ "license": "{{license}} Código con licencia",
+ "builtWith": "construido con",
+ "insufficientPromptTokens": "Tokens de aviso insuficientes; se requiere duplicar el tamaño del contexto en tokens para continuar."
+ }
\ No newline at end of file
diff --git a/src/data/locales/fr/Benchmark.json b/src/data/locales/fr/Benchmark.json
new file mode 100644
index 0000000..5a36155
--- /dev/null
+++ b/src/data/locales/fr/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Outil d'analyse comparative Electron BitNet",
+ "description": "Comparez votre modèle LLM Microsoft BitNet 1 bit ci-dessous, en gardant à l'esprit que les variables de test plus importantes prennent plus de temps à s'exécuter.",
+ "commandOptions": "Options de commande",
+ "numberOfTokens": "Nombre de jetons",
+ "numberOfTokensInfo": "Spécifiez le nombre de jetons à générer lors du benchmark.",
+ "model": "Modèle",
+ "modelInfo": "Entrez le chemin vers le ' quantifié{{fileFormat}}'fichier modèle généré à l'aide de BitNet de Microsoft'{{script}}' scénario.",
+ "threads": "Sujets",
+ "threadsInfo": "Spécifiez le nombre de threads à utiliser pour le test de performance.",
+ "promptLength": "Longueur de l'invite",
+ "promptLengthInfo": "Spécifiez la longueur de l'invite à partir de laquelle générer le texte.",
+ "runBenchmark": "Exécuter une analyse comparative",
+ "stopBenchmark": "Arrêter le benchmark",
+ "log": "Journal de référence",
+ "license": "Autorisé sous {{license}}",
+ "builtWith": "Construit avec"
+}
\ No newline at end of file
diff --git a/src/data/locales/fr/Home.json b/src/data/locales/fr/Home.json
index 2464b11..a1f8595 100644
--- a/src/data/locales/fr/Home.json
+++ b/src/data/locales/fr/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Outil d'inférence Electron BitNet",
- "description": "Microsoft a publié bitnet.cpp comme cadre d'inférence officiel pour les LLM 1 bit (par exemple, BitNet b1.58) qui s'exécute sur des processeurs ; ",
- "commandOptions": "Options de commande",
- "numberOfTokens": "Nombre de jetons à prédire",
- "model": "Modèle",
- "threads": "Sujets",
- "contextSize": "Taille du contexte",
- "temperature": "Température",
- "prompt": "Rapide",
- "numberOfTokensInfo": "Il s'agit de la quantité de jetons (mots) à générer lors de l'exécution du cadre d'inférence.",
- "modelInfo": "Entrez le chemin vers le ' quantifié{{fileFormat}}'fichier modèle généré à l'aide de BitNet de Microsoft'{{script}}' scénario.",
- "threadsInfo": "Le nombre de threads à utiliser pour exécuter l’inférence. ",
- "contextSizeInfo": "Taille du contexte d’invite. ",
- "temperatureInfo": "Un hyperparamètre qui contrôle le caractère aléatoire du texte généré. ",
- "promptInfo": "L'invite à partir de laquelle générer du texte. ",
- "runInference": "Exécuter l'inférence",
- "stopInference": "Arrêter l'inférence",
- "response": "Réponse",
- "license": "{{license}} Code sous licence",
- "builtWith": "construit avec"
- }
\ No newline at end of file
+ "title": "Outil d'inférence Electron BitNet",
+ "description": "Microsoft a publié BitNet en tant que cadre d'inférence officiel pour les LLM 1 bit qui fonctionnent sur des processeurs, essayez-le ci-dessous !",
+ "commandOptions": "Options de commande",
+ "numberOfTokens": "Nombre de jetons à prédire",
+ "model": "Modèle",
+ "threads": "Sujets",
+ "contextSize": "Taille du contexte",
+ "temperature": "Température",
+ "prompt": "Rapide",
+ "numberOfTokensInfo": "Il s'agit de la quantité de jetons (mots) à générer lors de l'exécution du cadre d'inférence.",
+ "modelInfo": "Entrez le chemin vers le ' quantifié{{fileFormat}}'fichier modèle généré à l'aide de BitNet de Microsoft'{{script}}' scénario.",
+ "threadsInfo": "Nombre de threads à utiliser pour exécuter l'inférence, limité au nombre de threads disponibles sur le processeur.",
+ "contextSizeInfo": "La taille du contexte d'invite détermine la quantité d'invite prise en compte lors de l'inférence.",
+ "temperatureInfo": "Hyperparamètre qui contrôle le caractère aléatoire du texte généré, des valeurs inférieures rendent le texte plus déterministe.",
+ "promptInfo": "Il s'agit du texte initial que le modèle utilisera pour commencer à générer la sortie.",
+ "runInference": "Exécuter l'inférence",
+ "stopInference": "Arrêter l'inférence",
+ "response": "Réponse",
+ "license": "{{license}} Code sous licence",
+ "builtWith": "construit avec"
+}
\ No newline at end of file
diff --git a/src/data/locales/fr/PageHeader.json b/src/data/locales/fr/PageHeader.json
index 5ef1614..fa428e7 100644
--- a/src/data/locales/fr/PageHeader.json
+++ b/src/data/locales/fr/PageHeader.json
@@ -9,9 +9,12 @@
"italian": "italien ({{locale}})",
"japanese": "Japonais ({{locale}})",
"korean": "coréen ({{locale}})",
- "portuguese": "portugais ({{locale}})",
+ "portuguese": "Portugais ({{locale}})",
"thai": "Thaï ({{locale}})",
+ "taiwanese": "Taïwanais ({{locale}})",
"index": "Tableau de bord d'inférence",
+ "benchmark": "Modèles de référence",
+ "perplexity": "Calculer la perplexité",
"back": "Retourner",
"llmFunctionality": "Fonctionnalité LLM",
"about": "À propos"
diff --git a/src/data/locales/fr/Perplexity.json b/src/data/locales/fr/Perplexity.json
new file mode 100644
index 0000000..5fe2e8f
--- /dev/null
+++ b/src/data/locales/fr/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Outil de perplexité Electron BitNet",
+ "description": "Calculez la perplexité (la confiance du modèle dans la prédiction du mot suivant) de votre modèle BitNet ci-dessous.",
+ "commandOptions": "Options de commande",
+ "prompt": "Rapide",
+ "promptInfo": "Il s'agit du texte initial que le modèle utilisera pour commencer à générer la sortie.",
+ "model": "Modèle",
+ "modelInfo": "Entrez le chemin vers le ' quantifié{{fileFormat}}'fichier modèle généré à l'aide de Microsoft'{{script}}' scénario.",
+ "threads": "Sujets",
+ "threadsInfo": "Le nombre de threads à utiliser pour exécuter le calcul de perplexité.",
+ "contextSize": "Taille du contexte",
+ "contextSizeInfo": "La taille du contexte d'invite détermine la quantité d'invite prise en compte lors du calcul de la perplexité.",
+ "pplStride": "Pas de perplexité",
+ "pplStrideInfo": "Pas à pas pour le calcul de la perplexité.",
+ "pplOutputType": "Type de sortie Perplexité",
+ "pplOutputTypeInfo": "Type de sortie pour le calcul de perplexité.",
+ "runPerplexity": "Calculer la perplexité",
+ "stopPerplexity": "Arrêter le calcul",
+ "log": "Résultat de perplexité calculé",
+ "error": "Erreur",
+ "license": "{{license}} Code sous licence",
+ "builtWith": "construit avec",
+ "insufficientPromptTokens": "Jetons d'invite insuffisants ; le double de la taille du contexte en jetons est nécessaire pour continuer."
+ }
\ No newline at end of file
diff --git a/src/data/locales/it/Benchmark.json b/src/data/locales/it/Benchmark.json
new file mode 100644
index 0000000..4add48f
--- /dev/null
+++ b/src/data/locales/it/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Strumento di benchmarking Electron BitNet",
+ "description": "Confronta di seguito il tuo modello LLM Microsoft BitNet a 1 bit, tenendo presente che le variabili di test più grandi richiedono più tempo per l'esecuzione.",
+ "commandOptions": "Opzioni di comando",
+ "numberOfTokens": "Numero di token",
+ "numberOfTokensInfo": "Specificare il numero di token da generare durante il benchmark.",
+ "model": "Modello",
+ "modelInfo": "Inserisci il percorso del \"quantizzato\"{{fileFormat}}'file modello generato utilizzando BitNet di Microsoft'{{script}}' copione.",
+ "threads": "Discussioni",
+ "threadsInfo": "Specificare il numero di thread da utilizzare per il benchmark.",
+ "promptLength": "Lunghezza richiesta",
+ "promptLengthInfo": "Specificare la lunghezza della richiesta da cui generare il testo.",
+ "runBenchmark": "Esegui benchmark",
+ "stopBenchmark": "Ferma il benchmark",
+ "log": "Registro dei benchmark",
+ "license": "Concesso in licenza sotto {{license}}",
+ "builtWith": "Costruito con"
+}
\ No newline at end of file
diff --git a/src/data/locales/it/Home.json b/src/data/locales/it/Home.json
index dc355d8..6b90cce 100644
--- a/src/data/locales/it/Home.json
+++ b/src/data/locales/it/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Strumento di inferenza BitNet di Electron",
- "description": "Microsoft ha rilasciato bitnet.cpp come framework di inferenza ufficiale per LLM a 1 bit (ad esempio BitNet b1.58) che funziona su CPU; ",
- "commandOptions": "Opzioni di comando",
- "numberOfTokens": "Numero di token da prevedere",
- "model": "Modello",
- "threads": "Discussioni",
- "contextSize": "Dimensione del contesto",
- "temperature": "Temperatura",
- "prompt": "Richiesta",
- "numberOfTokensInfo": "Questa è la quantità di token (parole) da generare dall'esecuzione del framework di inferenza.",
- "modelInfo": "Inserisci il percorso del \"quantizzato\"{{fileFormat}}'file modello generato utilizzando BitNet di Microsoft'{{script}}' copione.",
- "threadsInfo": "Il numero di thread da utilizzare per eseguire l'inferenza. ",
- "contextSizeInfo": "Dimensione del contesto del prompt. ",
- "temperatureInfo": "Un iperparametro che controlla la casualità del testo generato. ",
- "promptInfo": "La richiesta da cui generare il testo. ",
- "runInference": "Esegui l'inferenza",
- "stopInference": "Interrompi l'inferenza",
- "response": "Risposta",
- "license": "{{license}} Codice concesso in licenza",
- "builtWith": "costruito con"
- }
\ No newline at end of file
+ "title": "Strumento di inferenza BitNet di Electron",
+ "description": "Microsoft ha rilasciato BitNet come framework di inferenza ufficiale per LLM a 1 bit che funziona su CPU, provalo di seguito!",
+ "commandOptions": "Opzioni di comando",
+ "numberOfTokens": "Numero di token da prevedere",
+ "model": "Modello",
+ "threads": "Discussioni",
+ "contextSize": "Dimensione del contesto",
+ "temperature": "Temperatura",
+ "prompt": "Richiesta",
+ "numberOfTokensInfo": "Questa è la quantità di token (parole) da generare dall'esecuzione del framework di inferenza.",
+ "modelInfo": "Inserisci il percorso del \"quantizzato\"{{fileFormat}}'file modello generato utilizzando BitNet di Microsoft'{{script}}' copione.",
+ "threadsInfo": "Il numero di thread da utilizzare per eseguire l'inferenza, limitato al numero di thread disponibili sulla CPU.",
+ "contextSizeInfo": "La dimensione del contesto del prompt determina la parte del prompt che viene considerata durante l'inferenza.",
+ "temperatureInfo": "Un iperparametro che controlla la casualità del testo generato, valori più bassi rendono il testo più deterministico.",
+ "promptInfo": "Questo è il testo iniziale che il modello utilizzerà per iniziare a generare l'output.",
+ "runInference": "Esegui l'inferenza",
+ "stopInference": "Interrompi l'inferenza",
+ "response": "Risposta",
+ "license": "{{license}} Codice concesso in licenza",
+ "builtWith": "costruito con"
+}
\ No newline at end of file
diff --git a/src/data/locales/it/PageHeader.json b/src/data/locales/it/PageHeader.json
index 2577e2b..1dafdd7 100644
--- a/src/data/locales/it/PageHeader.json
+++ b/src/data/locales/it/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "coreano ({{locale}})",
"portuguese": "portoghese ({{locale}})",
"thai": "tailandese ({{locale}})",
+ "taiwanese": "Taiwanese ({{locale}})",
"index": "Dashboard di inferenza",
+ "benchmark": "Modelli di riferimento",
+ "perplexity": "Calcola la perplessità",
"back": "Torna indietro",
"llmFunctionality": "Funzionalità LLM",
"about": "Di"
diff --git a/src/data/locales/it/Perplexity.json b/src/data/locales/it/Perplexity.json
new file mode 100644
index 0000000..3019410
--- /dev/null
+++ b/src/data/locales/it/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Strumento per la perplessità di Electron BitNet",
+ "description": "Calcola la perplessità (la confidenza del modello nel prevedere la parola successiva) del tuo modello BitNet di seguito.",
+ "commandOptions": "Opzioni di comando",
+ "prompt": "Richiesta",
+ "promptInfo": "Questo è il testo iniziale che il modello utilizzerà per iniziare a generare l'output.",
+ "model": "Modello",
+ "modelInfo": "Inserisci il percorso del \"quantizzato\"{{fileFormat}}'file modello generato utilizzando Microsoft'{{script}}' copione.",
+ "threads": "Discussioni",
+ "threadsInfo": "Il numero di thread da utilizzare per eseguire il calcolo della perplessità.",
+ "contextSize": "Dimensione del contesto",
+ "contextSizeInfo": "La dimensione del contesto del prompt determina quanta parte del prompt viene considerata durante il calcolo della perplessità.",
+ "pplStride": "Passo di perplessità",
+ "pplStrideInfo": "Passo per il calcolo delle perplessità.",
+ "pplOutputType": "Tipo di output perplessità",
+ "pplOutputTypeInfo": "Tipo di output per il calcolo della perplessità.",
+ "runPerplexity": "Calcola la perplessità",
+ "stopPerplexity": "Interrompere il calcolo",
+ "log": "Risultato della perplessità calcolata",
+ "error": "Errore",
+ "license": "{{license}} Codice concesso in licenza",
+ "builtWith": "costruito con",
+ "insufficientPromptTokens": "Token prompt insufficienti, per procedere è necessaria il doppio della dimensione del contesto in token."
+ }
\ No newline at end of file
diff --git a/src/data/locales/ja/Benchmark.json b/src/data/locales/ja/Benchmark.json
new file mode 100644
index 0000000..b07036a
--- /dev/null
+++ b/src/data/locales/ja/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Electron BitNet ベンチマーク ツール",
+ "description": "以下で Microsoft BitNet 1 ビット LLM モデルのベンチマークを行います。テスト変数が大きいほど実行に時間がかかることに留意してください。",
+ "commandOptions": "コマンドオプション",
+ "numberOfTokens": "トークンの数",
+ "numberOfTokensInfo": "ベンチマーク中に生成するトークンの数を指定します。",
+ "model": "モデル",
+ "modelInfo": "量子化された ' へのパスを入力します。{{fileFormat}}' Microsoft の BitNet を使用して生成されたモデル ファイル '{{script}}' スクリプト。",
+ "threads": "スレッド",
+ "threadsInfo": "ベンチマークに使用するスレッドの数を指定します。",
+ "promptLength": "プロンプトの長さ",
+ "promptLengthInfo": "テキストを生成するプロンプトの長さを指定します。",
+ "runBenchmark": "ベンチマークの実行",
+ "stopBenchmark": "停止ベンチマーク",
+ "log": "ベンチマークログ",
+ "license": "以下に基づいてライセンスを取得 {{license}}",
+ "builtWith": "で構築"
+}
\ No newline at end of file
diff --git a/src/data/locales/ja/Home.json b/src/data/locales/ja/Home.json
index b4fb479..0a2c5b1 100644
--- a/src/data/locales/ja/Home.json
+++ b/src/data/locales/ja/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Electron BitNet 推論ツール",
- "description": "Microsoft は、CPU 上で動作する 1 ビット LLM (BitNet b1.58 など) の公式推論フレームワークとして bitnet.cpp をリリースしました。",
- "commandOptions": "コマンドオプション",
- "numberOfTokens": "予測するトークンの数",
- "model": "モデル",
- "threads": "スレッド",
- "contextSize": "コンテキストサイズ",
- "temperature": "温度",
- "prompt": "プロンプト",
- "numberOfTokensInfo": "これは、推論フレームワークの実行によって生成されるトークン (単語) の量です。",
- "modelInfo": "量子化された ' へのパスを入力します。{{fileFormat}}' Microsoft の BitNet を使用して生成されたモデル ファイル '{{script}}' スクリプト。",
- "threadsInfo": "推論の実行に使用するスレッドの数。",
- "contextSizeInfo": "プロンプトコンテキストのサイズ。",
- "temperatureInfo": "生成されるテキストのランダム性を制御するハイパーパラメーター。",
- "promptInfo": "テキストを生成するプロンプト。",
- "runInference": "推論の実行",
- "stopInference": "推論の停止",
- "response": "応答",
- "license": "{{license}} ライセンスコード",
- "builtWith": "で構築された"
- }
\ No newline at end of file
+ "title": "Electron BitNet 推論ツール",
+ "description": "Microsoft は、CPU 上で実行される 1 ビット LLM の公式推論フレームワークとして BitNet をリリースしました。以下で試してみてください。",
+ "commandOptions": "コマンドオプション",
+ "numberOfTokens": "予測するトークンの数",
+ "model": "モデル",
+ "threads": "スレッド",
+ "contextSize": "コンテキストサイズ",
+ "temperature": "温度",
+ "prompt": "プロンプト",
+ "numberOfTokensInfo": "これは、推論フレームワークの実行によって生成されるトークン (単語) の量です。",
+ "modelInfo": "量子化された ' へのパスを入力します。{{fileFormat}}' Microsoft の BitNet を使用して生成されたモデル ファイル '{{script}}' スクリプト。",
+ "threadsInfo": "推論の実行に使用するスレッドの数。CPU で使用可能なスレッドの数に制限されます。",
+ "contextSizeInfo": "プロンプト コンテキストのサイズによって、推論中にどの程度のプロンプトが考慮されるかが決まります。",
+ "temperatureInfo": "生成されるテキストのランダム性を制御するハイパーパラメータ。値が小さいほどテキストがより決定的になります。",
+ "promptInfo": "これは、モデルが出力の生成を開始するために使用する最初のテキストです。",
+ "runInference": "推論の実行",
+ "stopInference": "推論の停止",
+ "response": "応答",
+ "license": "{{license}} ライセンスコード",
+ "builtWith": "で構築された"
+}
\ No newline at end of file
diff --git a/src/data/locales/ja/PageHeader.json b/src/data/locales/ja/PageHeader.json
index 073390c..15a4b36 100644
--- a/src/data/locales/ja/PageHeader.json
+++ b/src/data/locales/ja/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "韓国語 ({{locale}})",
"portuguese": "ポルトガル語 ({{locale}})",
"thai": "タイ語 ({{locale}})",
+ "taiwanese": "台湾語({{locale}})",
"index": "推論ダッシュボード",
+ "benchmark": "ベンチマークモデル",
+ "perplexity": "複雑さを計算する",
"back": "戻る",
"llmFunctionality": "LLM の機能",
"about": "について"
diff --git a/src/data/locales/ja/Perplexity.json b/src/data/locales/ja/Perplexity.json
new file mode 100644
index 0000000..4db31d2
--- /dev/null
+++ b/src/data/locales/ja/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Electron BitNet パープレキシティ ツール",
+ "description": "以下の BitNet モデルのパープレキシティ (次の単語を予測するモデルの信頼度) を計算します。",
+ "commandOptions": "コマンドオプション",
+ "prompt": "プロンプト",
+ "promptInfo": "これは、モデルが出力の生成を開始するために使用する最初のテキストです。",
+ "model": "モデル",
+ "modelInfo": "量子化された ' へのパスを入力します。{{fileFormat}}' Microsoft の ' を使用して生成されたモデル ファイル{{script}}' スクリプト。",
+ "threads": "スレッド",
+ "threadsInfo": "複雑さの計算を実行するために使用するスレッドの数。",
+ "contextSize": "コンテキストサイズ",
+ "contextSizeInfo": "プロンプト コンテキストのサイズによって、パープレキシティの計算中にどの程度のプロンプトが考慮されるかが決まります。",
+ "pplStride": "パープレキシティ ストライド",
+ "pplStrideInfo": "パープレキシティ計算のストライド。",
+ "pplOutputType": "パープレキシティ出力タイプ",
+ "pplOutputTypeInfo": "複雑度計算の出力タイプ。",
+ "runPerplexity": "複雑さを計算する",
+ "stopPerplexity": "計算を停止する",
+ "log": "計算された複雑さの結果",
+ "error": "エラー",
+ "license": "{{license}} ライセンスコード",
+ "builtWith": "で構築された",
+ "insufficientPromptTokens": "プロンプト トークンが不十分です。続行するには、トークンのコンテキスト サイズの 2 倍が必要です。"
+ }
\ No newline at end of file
diff --git a/src/data/locales/ko/Benchmark.json b/src/data/locales/ko/Benchmark.json
new file mode 100644
index 0000000..f4ff90a
--- /dev/null
+++ b/src/data/locales/ko/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Electron BitNet 벤치마킹 도구",
+ "description": "아래의 Microsoft BitNet 1비트 LLM 모델을 벤치마킹하세요. 테스트 변수가 클수록 실행하는 데 시간이 더 오래 걸린다는 점을 염두에 두세요.",
+ "commandOptions": "명령 옵션",
+ "numberOfTokens": "토큰 수",
+ "numberOfTokensInfo": "벤치마크 중에 생성할 토큰 수를 지정합니다.",
+ "model": "모델",
+ "modelInfo": "양자화된 '' 경로를 입력하세요.{{fileFormat}}' Microsoft의 BitNet을 사용하여 생성된 모델 파일 '{{script}}' 스크립트.",
+ "threads": "스레드",
+ "threadsInfo": "벤치마크에 사용할 스레드 수를 지정합니다.",
+ "promptLength": "프롬프트 길이",
+ "promptLengthInfo": "텍스트를 생성할 프롬프트의 길이를 지정합니다.",
+ "runBenchmark": "벤치마크 실행",
+ "stopBenchmark": "벤치마크 중지",
+ "log": "벤치마크 로그",
+ "license": "아래 라이선스 {{license}}",
+ "builtWith": "다음으로 제작됨"
+}
\ No newline at end of file
diff --git a/src/data/locales/ko/Home.json b/src/data/locales/ko/Home.json
index 545fb03..fbeace3 100644
--- a/src/data/locales/ko/Home.json
+++ b/src/data/locales/ko/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Electron BitNet 추론 도구",
- "description": "Microsoft는 CPU에서 실행되는 1비트 LLM(예: BitNet b1.58)에 대한 공식 추론 프레임워크로 bitnet.cpp를 출시했습니다. ",
- "commandOptions": "명령 옵션",
- "numberOfTokens": "예측할 토큰 수",
- "model": "모델",
- "threads": "스레드",
- "contextSize": "컨텍스트 크기",
- "temperature": "온도",
- "prompt": "즉각적인",
- "numberOfTokensInfo": "추론 프레임워크를 실행하여 생성되는 토큰(단어)의 수량입니다.",
- "modelInfo": "양자화된 '에 대한 경로를 입력합니다.{{fileFormat}}' Microsoft의 BitNet을 사용하여 생성된 모델 파일 '{{script}}' 스크립트.",
- "threadsInfo": "추론을 실행하는 데 사용할 스레드 수입니다. ",
- "contextSizeInfo": "프롬프트 컨텍스트의 크기입니다. ",
- "temperatureInfo": "생성된 텍스트의 무작위성을 제어하는 하이퍼파라미터입니다. ",
- "promptInfo": "텍스트를 생성할 프롬프트입니다. ",
- "runInference": "추론 실행",
- "stopInference": "추론 중지",
- "response": "응답",
- "license": "{{license}} 라이센스 코드",
- "builtWith": "으로 구축"
- }
\ No newline at end of file
+ "title": "Electron BitNet 추론 도구",
+ "description": "Microsoft는 CPU에서 실행되는 1비트 LLM에 대한 공식 추론 프레임워크로 BitNet을 출시했습니다. 아래에서 사용해 보세요!",
+ "commandOptions": "명령 옵션",
+ "numberOfTokens": "예측할 토큰 수",
+ "model": "모델",
+ "threads": "스레드",
+ "contextSize": "컨텍스트 크기",
+ "temperature": "온도",
+ "prompt": "즉각적인",
+ "numberOfTokensInfo": "추론 프레임워크를 실행하여 생성되는 토큰(단어)의 수량입니다.",
+ "modelInfo": "양자화된 '' 경로를 입력하세요.{{fileFormat}}' Microsoft의 BitNet을 사용하여 생성된 모델 파일 '{{script}}' 스크립트.",
+ "threadsInfo": "추론을 실행하는 데 사용할 스레드 수는 CPU에서 사용 가능한 스레드 수로 제한됩니다.",
+ "contextSizeInfo": "프롬프트 컨텍스트의 크기에 따라 추론 중에 고려되는 프롬프트의 양이 결정됩니다.",
+ "temperatureInfo": "생성된 텍스트의 무작위성을 제어하는 하이퍼 매개변수로, 값이 낮을수록 텍스트가 더 결정적으로 만들어집니다.",
+ "promptInfo": "이는 모델이 출력 생성을 시작하는 데 사용할 초기 텍스트입니다.",
+ "runInference": "추론 실행",
+ "stopInference": "추론 중지",
+ "response": "응답",
+ "license": "{{license}} 라이센스 코드",
+ "builtWith": "으로 구축"
+}
\ No newline at end of file
diff --git a/src/data/locales/ko/PageHeader.json b/src/data/locales/ko/PageHeader.json
index d792ddb..ddefef1 100644
--- a/src/data/locales/ko/PageHeader.json
+++ b/src/data/locales/ko/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "한국인 ({{locale}})",
"portuguese": "포르투갈어({{locale}})",
"thai": "태국어({{locale}})",
+ "taiwanese": "대만어({{locale}})",
"index": "추론 대시보드",
+ "benchmark": "벤치마크 모델",
+ "perplexity": "당혹감을 계산하다",
"back": "돌아가기",
"llmFunctionality": "LLM 기능",
"about": "에 대한"
diff --git a/src/data/locales/ko/Perplexity.json b/src/data/locales/ko/Perplexity.json
new file mode 100644
index 0000000..c04fccb
--- /dev/null
+++ b/src/data/locales/ko/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Electron BitNet Perplexity 도구",
+ "description": "아래 BitNet 모델의 복잡성(다음 단어 예측에 대한 모델의 신뢰도)을 계산하세요.",
+ "commandOptions": "명령 옵션",
+ "prompt": "즉각적인",
+ "promptInfo": "이는 모델이 출력 생성을 시작하는 데 사용할 초기 텍스트입니다.",
+ "model": "모델",
+ "modelInfo": "양자화된 '' 경로를 입력하세요.{{fileFormat}}' Microsoft를 사용하여 생성된 모델 파일 '{{script}}' 스크립트.",
+ "threads": "스레드",
+ "threadsInfo": "복잡성 계산을 실행하는 데 사용할 스레드 수입니다.",
+ "contextSize": "컨텍스트 크기",
+ "contextSizeInfo": "프롬프트 컨텍스트의 크기에 따라 복잡성 계산 중에 고려되는 프롬프트의 양이 결정됩니다.",
+ "pplStride": "당혹스러운 발걸음",
+ "pplStrideInfo": "복잡성 계산을 위한 보폭.",
+ "pplOutputType": "Perplexity 출력 유형",
+ "pplOutputTypeInfo": "복잡성 계산을 위한 출력 유형입니다.",
+ "runPerplexity": "당혹감을 계산하다",
+ "stopPerplexity": "계산 중지",
+ "log": "계산된 혼란 결과",
+ "error": "오류",
+ "license": "{{license}} 라이센스 코드",
+ "builtWith": "으로 구축",
+ "insufficientPromptTokens": "프롬프트 토큰이 부족합니다. 계속하려면 토큰의 컨텍스트 크기를 두 배로 늘려야 합니다."
+ }
\ No newline at end of file
diff --git a/src/data/locales/pt/Benchmark.json b/src/data/locales/pt/Benchmark.json
new file mode 100644
index 0000000..234585d
--- /dev/null
+++ b/src/data/locales/pt/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Ferramenta de benchmarking Electron BitNet",
+ "description": "Compare seu modelo LLM de 1 bit Microsoft BitNet abaixo, tendo em mente que variáveis de teste maiores demoram mais para serem executadas.",
+ "commandOptions": "Opções de comando",
+ "numberOfTokens": "Número de tokens",
+ "numberOfTokensInfo": "Especifique o número de tokens a serem gerados durante o benchmark.",
+ "model": "Modelo",
+ "modelInfo": "Insira o caminho para o quantizado '{{fileFormat}}'arquivo de modelo gerado usando BitNet da Microsoft'{{script}}'roteiro.",
+ "threads": "Tópicos",
+ "threadsInfo": "Especifique o número de threads a serem usados para o benchmark.",
+ "promptLength": "Comprimento do prompt",
+ "promptLengthInfo": "Especifique o comprimento do prompt para gerar o texto.",
+ "runBenchmark": "Executar referência",
+ "stopBenchmark": "Parar referência",
+ "log": "Registro de referência",
+ "license": "Licenciado sob {{license}}",
+ "builtWith": "Construído com"
+}
\ No newline at end of file
diff --git a/src/data/locales/pt/Home.json b/src/data/locales/pt/Home.json
index 7b4ec6b..93e84b3 100644
--- a/src/data/locales/pt/Home.json
+++ b/src/data/locales/pt/Home.json
@@ -1,22 +1,22 @@
{
- "title": "Ferramenta de inferência Electron BitNet",
- "description": "A Microsoft lançou o bitnet.cpp como sua estrutura de inferência oficial para LLMs de 1 bit (por exemplo, BitNet b1.58) que roda em CPUs; ",
- "commandOptions": "Opções de comando",
- "numberOfTokens": "Número de tokens para prever",
- "model": "Modelo",
- "threads": "Tópicos",
- "contextSize": "Tamanho do contexto",
- "temperature": "Temperatura",
- "prompt": "Incitar",
- "numberOfTokensInfo": "Esta é a quantidade de tokens (palavras) a serem gerados a partir da execução da estrutura de inferência.",
- "modelInfo": "Insira o caminho para o quantizado '{{fileFormat}}'arquivo de modelo gerado usando o BitNet da Microsoft'{{script}}'roteiro.",
- "threadsInfo": "O número de threads a serem usados para executar a inferência. ",
- "contextSizeInfo": "Tamanho do contexto de prompt. ",
- "temperatureInfo": "Um hiperparâmetro que controla a aleatoriedade do texto gerado. ",
- "promptInfo": "O prompt para gerar texto. ",
- "runInference": "Executar inferência",
- "stopInference": "Parar inferência",
- "response": "Resposta",
- "license": "{{license}} Código licenciado",
- "builtWith": "construído com"
- }
\ No newline at end of file
+ "title": "Ferramenta de inferência Electron BitNet",
+ "description": "A Microsoft lançou o BitNet como sua estrutura de inferência oficial para LLMs de 1 bit que roda em CPUs, experimente abaixo!",
+ "commandOptions": "Opções de comando",
+ "numberOfTokens": "Número de tokens para prever",
+ "model": "Modelo",
+ "threads": "Tópicos",
+ "contextSize": "Tamanho do contexto",
+ "temperature": "Temperatura",
+ "prompt": "Incitar",
+ "numberOfTokensInfo": "Esta é a quantidade de tokens (palavras) a serem gerados a partir da execução da estrutura de inferência.",
+ "modelInfo": "Insira o caminho para o quantizado '{{fileFormat}}'arquivo de modelo gerado usando BitNet da Microsoft'{{script}}'roteiro.",
+ "threadsInfo": "O número de threads a serem usados para executar a inferência, limitado ao número de threads disponíveis na CPU.",
+ "contextSizeInfo": "O tamanho do contexto do prompt determina quanto do prompt é considerado durante a inferência.",
+ "temperatureInfo": "Um hiperparâmetro que controla a aleatoriedade do texto gerado, valores mais baixos tornam o texto mais determinístico.",
+ "promptInfo": "Este é o texto inicial que o modelo usará para começar a gerar a saída.",
+ "runInference": "Executar inferência",
+ "stopInference": "Parar inferência",
+ "response": "Resposta",
+ "license": "{{license}} Código licenciado",
+ "builtWith": "construído com"
+}
\ No newline at end of file
diff --git a/src/data/locales/pt/PageHeader.json b/src/data/locales/pt/PageHeader.json
index a03b7c0..6120e29 100644
--- a/src/data/locales/pt/PageHeader.json
+++ b/src/data/locales/pt/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "Coreano ({{locale}})",
"portuguese": "Português ({{locale}})",
"thai": "Tailandês ({{locale}})",
+ "taiwanese": "Taiwanês ({{locale}})",
"index": "Painel de inferência",
+ "benchmark": "Modelos de referência",
+ "perplexity": "Calcular Perplexidade",
"back": "Volte",
"llmFunctionality": "Funcionalidade LLM",
"about": "Sobre"
diff --git a/src/data/locales/pt/Perplexity.json b/src/data/locales/pt/Perplexity.json
new file mode 100644
index 0000000..431077d
--- /dev/null
+++ b/src/data/locales/pt/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Ferramenta de perplexidade Electron BitNet",
+ "description": "Calcule a perplexidade (a confiança do modelo em prever a próxima palavra) do seu modelo BitNet abaixo.",
+ "commandOptions": "Opções de comando",
+ "prompt": "Incitar",
+ "promptInfo": "Este é o texto inicial que o modelo usará para começar a gerar a saída.",
+ "model": "Modelo",
+ "modelInfo": "Insira o caminho para o quantizado '{{fileFormat}}'arquivo de modelo gerado usando o '{{script}}'roteiro.",
+ "threads": "Tópicos",
+ "threadsInfo": "O número de threads a serem usados para executar o cálculo de perplexidade.",
+ "contextSize": "Tamanho do contexto",
+ "contextSizeInfo": "O tamanho do contexto do prompt determina quanto do prompt é considerado durante o cálculo da perplexidade.",
+ "pplStride": "Passo de Perplexidade",
+ "pplStrideInfo": "Passo para cálculo de perplexidade.",
+ "pplOutputType": "Tipo de saída de perplexidade",
+ "pplOutputTypeInfo": "Tipo de saída para cálculo de perplexidade.",
+ "runPerplexity": "Calcular Perplexidade",
+ "stopPerplexity": "Parar cálculo",
+ "log": "Resultado de perplexidade calculada",
+ "error": "Erro",
+ "license": "{{license}} Código licenciado",
+ "builtWith": "construído com",
+ "insufficientPromptTokens": "Tokens de prompt insuficientes, é necessário o dobro do tamanho do contexto em tokens para prosseguir."
+ }
\ No newline at end of file
diff --git a/src/data/locales/th/Benchmark.json b/src/data/locales/th/Benchmark.json
new file mode 100644
index 0000000..f245691
--- /dev/null
+++ b/src/data/locales/th/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "เครื่องมือเปรียบเทียบอิเล็กตรอน BitNet",
+ "description": "เปรียบเทียบโมเดล Microsoft BitNet 1-bit LLM ของคุณด้านล่างนี้ โดยคำนึงว่าตัวแปรทดสอบที่ใหญ่กว่าจะใช้เวลารันนานกว่า",
+ "commandOptions": "ตัวเลือกคำสั่ง",
+ "numberOfTokens": "จำนวนโทเค็น",
+ "numberOfTokensInfo": "ระบุจำนวนโทเค็นที่จะสร้างระหว่างการวัดประสิทธิภาพ",
+ "model": "แบบอย่าง",
+ "modelInfo": "ป้อนเส้นทางไปยัง quantized '{{fileFormat}}' ไฟล์โมเดลที่สร้างโดยใช้ BitNet ของ Microsoft '{{script}}' สคริปต์",
+ "threads": "กระทู้",
+ "threadsInfo": "ระบุจำนวนเธรดที่จะใช้สำหรับการวัดประสิทธิภาพ",
+ "promptLength": "ความยาวพร้อมท์",
+ "promptLengthInfo": "ระบุความยาวของพรอมต์เพื่อสร้างข้อความ",
+ "runBenchmark": "เรียกใช้เกณฑ์มาตรฐาน",
+ "stopBenchmark": "หยุดเกณฑ์มาตรฐาน",
+ "log": "บันทึกมาตรฐาน",
+ "license": "ได้รับอนุญาตภายใต้ {{license}}",
+ "builtWith": "สร้างด้วย"
+}
\ No newline at end of file
diff --git a/src/data/locales/th/Home.json b/src/data/locales/th/Home.json
index 0f6c725..37c0838 100644
--- a/src/data/locales/th/Home.json
+++ b/src/data/locales/th/Home.json
@@ -1,22 +1,22 @@
{
- "title": "เครื่องมืออนุมานอิเล็กตรอน BitNet",
- "description": "Microsoft เปิดตัว bitnet.cpp เป็นเฟรมเวิร์กการอนุมานอย่างเป็นทางการสำหรับ LLM 1 บิต (เช่น BitNet b1.58) ซึ่งทำงานบน CPU ",
- "commandOptions": "ตัวเลือกคำสั่ง",
- "numberOfTokens": "จำนวนโทเค็นที่จะทำนาย",
- "model": "แบบอย่าง",
- "threads": "กระทู้",
- "contextSize": "ขนาดบริบท",
- "temperature": "อุณหภูมิ",
- "prompt": "พรอมต์",
- "numberOfTokensInfo": "นี่คือปริมาณของโทเค็น (คำ) ที่จะสร้างจากการรันเฟรมเวิร์กการอนุมาน",
- "modelInfo": "ป้อนเส้นทางไปยัง quantized '{{fileFormat}}' ไฟล์โมเดลที่สร้างโดยใช้ BitNet ของ Microsoft '{{script}}' สคริปต์",
- "threadsInfo": "จำนวนเธรดที่จะใช้สำหรับการรันการอนุมาน ",
- "contextSizeInfo": "ขนาดของบริบทพร้อมท์ ",
- "temperatureInfo": "ไฮเปอร์พารามิเตอร์ที่ควบคุมการสุ่มของข้อความที่สร้างขึ้น ",
- "promptInfo": "พร้อมท์ให้สร้างข้อความจาก ",
- "runInference": "เรียกใช้การอนุมาน",
- "stopInference": "หยุดการอนุมาน",
- "response": "การตอบสนอง",
- "license": "{{license}} รหัสใบอนุญาต",
- "builtWith": "สร้างด้วย"
- }
\ No newline at end of file
+ "title": "เครื่องมืออนุมานอิเล็กตรอน BitNet",
+ "description": "Microsoft เปิดตัว BitNet เป็นเฟรมเวิร์กการอนุมานอย่างเป็นทางการสำหรับ LLM 1 บิตที่ทำงานบน CPU ลองใช้ด้านล่าง!",
+ "commandOptions": "ตัวเลือกคำสั่ง",
+ "numberOfTokens": "จำนวนโทเค็นที่จะทำนาย",
+ "model": "แบบอย่าง",
+ "threads": "กระทู้",
+ "contextSize": "ขนาดบริบท",
+ "temperature": "อุณหภูมิ",
+ "prompt": "พรอมต์",
+ "numberOfTokensInfo": "นี่คือปริมาณของโทเค็น (คำ) ที่จะสร้างจากการรันเฟรมเวิร์กการอนุมาน",
+ "modelInfo": "ป้อนเส้นทางไปยัง quantized '{{fileFormat}}' ไฟล์โมเดลที่สร้างโดยใช้ BitNet ของ Microsoft '{{script}}' สคริปต์",
+ "threadsInfo": "จำนวนเธรดที่จะใช้สำหรับการรันการอนุมาน ซึ่งจำกัดอยู่ที่จำนวนเธรดที่มีอยู่บน CPU",
+ "contextSizeInfo": "ขนาดของบริบทของพรอมต์จะกำหนดจำนวนพรอมต์ที่จะพิจารณาในระหว่างการอนุมาน",
+ "temperatureInfo": "ไฮเปอร์พารามิเตอร์ที่ควบคุมการสุ่มของข้อความที่สร้างขึ้น ค่าที่ต่ำกว่าจะทำให้ข้อความมีการกำหนดมากขึ้น",
+ "promptInfo": "นี่คือข้อความเริ่มต้นที่โมเดลจะใช้เพื่อเริ่มสร้างเอาต์พุต",
+ "runInference": "เรียกใช้การอนุมาน",
+ "stopInference": "หยุดการอนุมาน",
+ "response": "การตอบสนอง",
+ "license": "{{license}} รหัสใบอนุญาต",
+ "builtWith": "สร้างด้วย"
+}
\ No newline at end of file
diff --git a/src/data/locales/th/PageHeader.json b/src/data/locales/th/PageHeader.json
index 2e07911..88ea142 100644
--- a/src/data/locales/th/PageHeader.json
+++ b/src/data/locales/th/PageHeader.json
@@ -11,7 +11,10 @@
"korean": "เกาหลี ({{locale}}-",
"portuguese": "โปรตุเกส ({{locale}}-",
"thai": "แบบไทย ({{locale}}-",
+ "taiwanese": "ชาวไต้หวัน ({{locale}}-",
"index": "แดชบอร์ดการอนุมาน",
+ "benchmark": "โมเดลมาตรฐาน",
+ "perplexity": "คำนวณความฉงนสนเท่ห์",
"back": "กลับไป",
"llmFunctionality": "ฟังก์ชั่น LLM",
"about": "เกี่ยวกับ"
diff --git a/src/data/locales/th/Perplexity.json b/src/data/locales/th/Perplexity.json
new file mode 100644
index 0000000..c848610
--- /dev/null
+++ b/src/data/locales/th/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "เครื่องมือความฉงนสนเท่ห์ของอิเล็กตรอน BitNet",
+ "description": "คำนวณความฉงนสนเท่ห์ (ความมั่นใจของโมเดลในการทำนายคำถัดไป) ของโมเดล BitNet ของคุณด้านล่าง",
+ "commandOptions": "ตัวเลือกคำสั่ง",
+ "prompt": "พรอมต์",
+ "promptInfo": "นี่คือข้อความเริ่มต้นที่โมเดลจะใช้เพื่อเริ่มสร้างเอาต์พุต",
+ "model": "แบบอย่าง",
+ "modelInfo": "ป้อนเส้นทางไปยัง quantized '{{fileFormat}}' ไฟล์โมเดลที่สร้างโดยใช้ของ Microsoft '{{script}}' สคริปต์",
+ "threads": "กระทู้",
+ "threadsInfo": "จำนวนเธรดที่จะใช้สำหรับการรันการคำนวณความงุนงง",
+ "contextSize": "ขนาดบริบท",
+ "contextSizeInfo": "ขนาดของบริบทพร้อมท์จะกำหนดจำนวนพร้อมท์ที่จะพิจารณาในระหว่างการคำนวณความสับสน",
+ "pplStride": "ก้าวย่างที่สับสน",
+ "pplStrideInfo": "ก้าวไปสู่การคำนวณความสับสน",
+ "pplOutputType": "ประเภทเอาต์พุตความฉงนสนเท่ห์",
+ "pplOutputTypeInfo": "ประเภทเอาต์พุตสำหรับการคำนวณความฉงนสนเท่ห์",
+ "runPerplexity": "คำนวณความฉงนสนเท่ห์",
+ "stopPerplexity": "หยุดการคำนวณ",
+ "log": "ผลลัพธ์ความฉงนสนเท่ห์ที่คำนวณได้",
+ "error": "ข้อผิดพลาด",
+ "license": "{{license}} รหัสใบอนุญาต",
+ "builtWith": "สร้างด้วย",
+ "insufficientPromptTokens": "โทเค็นพร้อมท์ไม่เพียงพอ ต้องใช้ขนาดบริบทเป็นสองเท่าในโทเค็นเพื่อดำเนินการต่อ"
+ }
\ No newline at end of file
diff --git a/src/data/locales/tw/Benchmark.json b/src/data/locales/tw/Benchmark.json
new file mode 100644
index 0000000..4a4f923
--- /dev/null
+++ b/src/data/locales/tw/Benchmark.json
@@ -0,0 +1,18 @@
+{
+ "title": "Electron BitNet 基準測試工具",
+ "description": "在下面對您的 Microsoft BitNet 1 位 LLM 模型進行基準測試,請記住,較大的測試變數需要更長的時間來運行。",
+ "commandOptions": "命令選項",
+ "numberOfTokens": "代幣數量",
+ "numberOfTokensInfo": "指定基準測試期間要生成的代幣數量。",
+ "model": "模型",
+ "modelInfo": "輸入使用 Microsoft 的 BitNet '{{script}}' 腳本生成的量化 '{{fileFormat}}' 模型文件的路徑。",
+ "threads": "線程",
+ "threadsInfo": "指定要用於基準測試的線程數。",
+ "promptLength": "提示長度",
+ "promptLengthInfo": "指定要從中生成文本的提示的長度。",
+ "runBenchmark": "運行基準測試",
+ "stopBenchmark": "停止基準測試",
+ "log": "基準測試日誌",
+ "license": "根據 {{license}} 獲得許可",
+ "builtWith": "內置"
+}
\ No newline at end of file
diff --git a/src/data/locales/tw/Home.json b/src/data/locales/tw/Home.json
new file mode 100644
index 0000000..fc06d11
--- /dev/null
+++ b/src/data/locales/tw/Home.json
@@ -0,0 +1,22 @@
+{
+ "title": "Electron BitNet 推理工具",
+ "description": "Microsoft 發佈了 BitNet 作為其在 CPU 上運行的 1 位 LLM 的官方推理框架,請在下面試用!",
+ "commandOptions": "命令選項",
+ "numberOfTokens": "要預測的代幣數量",
+ "model": "模型",
+ "threads": "線程",
+ "contextSize": "上下文大小",
+ "temperature": "溫度",
+ "prompt": "提示",
+ "numberOfTokensInfo": "這是通過運行推理框架生成的令牌(單詞)數量。",
+ "modelInfo": "輸入使用 Microsoft 的 BitNet '{{script}}' 腳本生成的量化 '{{fileFormat}}' 模型文件的路徑。",
+ "threadsInfo": "用於運行推理的線程數,限制為 CPU 上可用的線程數。",
+ "contextSizeInfo": "提示上下文的大小決定了在推理過程中考慮了多少提示。",
+ "temperatureInfo": "控制生成文本隨機性的超參數,較低的值會使文本更具確定性。",
+ "promptInfo": "這是模型將用於開始生成輸出的初始文本。",
+ "runInference": "運行推理",
+ "stopInference": "停止推理",
+ "response": "回應",
+ "license": "{{license}} 許可代碼",
+ "builtWith": "構建"
+}
\ No newline at end of file
diff --git a/src/data/locales/tw/PageHeader.json b/src/data/locales/tw/PageHeader.json
new file mode 100644
index 0000000..ad99061
--- /dev/null
+++ b/src/data/locales/tw/PageHeader.json
@@ -0,0 +1,21 @@
+{
+ "commandSearchPlaceholder": "鍵入命令或搜索...",
+ "noResultsFound": "未找到結果。",
+ "english": "英語 ({{locale}})",
+ "danish": "丹麥語({{locale}})",
+ "german": "德語({{locale}})",
+ "spanish": "西班牙語 ({{locale}})",
+ "french": "法語 ({{locale}})",
+ "italian": "意大利語({{locale}})",
+ "japanese": "日語 ({{locale}})",
+ "korean": "韓語 ({{locale}})",
+ "portuguese": "葡萄牙語({{locale}})",
+ "thai": "泰語({{locale}})",
+ "taiwanese": "臺灣語({{locale}})",
+ "index": "推理控制面板",
+ "benchmark": "基準測試模型",
+ "perplexity": "計算困惑度",
+ "back": "返回",
+ "llmFunctionality": "LLM 功能",
+ "about": "關於"
+}
\ No newline at end of file
diff --git a/src/data/locales/tw/Perplexity.json b/src/data/locales/tw/Perplexity.json
new file mode 100644
index 0000000..6a90144
--- /dev/null
+++ b/src/data/locales/tw/Perplexity.json
@@ -0,0 +1,24 @@
+{
+ "title": "Electron BitNet 困惑工具",
+ "description": "計算下面的 BitNet 模型的困惑度(模型預測下一個單詞的置信度)。",
+ "commandOptions": "命令選項",
+ "prompt": "迅速的",
+ "promptInfo": "這是模型將用於開始生成輸出的初始文本。",
+ "model": "模型",
+ "modelInfo": "輸入使用 Microsoft 的 '{{script}}' 腳本生成的量化 '{{fileFormat}}' 模型文件的路徑。",
+ "threads": "線程",
+ "threadsInfo": "用於運行困惑度計算的線程數。",
+ "contextSize": "上下文大小",
+ "contextSizeInfo": "提示上下文的大小決定了在困惑度計算過程中考慮了多少提示。",
+ "pplStride": "困惑步幅",
+ "pplStrideInfo": "步幅用於困惑度計算。",
+ "pplOutputType": "Perplexity 輸出類型",
+ "pplOutputTypeInfo": "用於困惑度計算的輸出類型。",
+ "runPerplexity": "計算困惑度",
+ "stopPerplexity": "停止計算",
+ "log": "計算的困惑度結果",
+ "error": "錯誤",
+ "license": "{{license}} 授業代碼",
+ "builtWith": "建有",
+ "insufficientPromptTokens": "提示令牌不足,需要雙倍上下文大小的令牌才能繼續。"
+ }
\ No newline at end of file
diff --git a/src/lib/i18n.js b/src/lib/i18n.js
index 232e63f..2c6da65 100644
--- a/src/lib/i18n.js
+++ b/src/lib/i18n.js
@@ -2,10 +2,12 @@ import i18n from "i18next";
import { initReactI18next } from "react-i18next";
import { persistentAtom } from "@nanostores/persistent";
-const languages = ["en", "da", "de", "es", "fr", "it", "ja", "ko", "pt", "th"];
+const languages = ["en", "da", "de", "es", "fr", "it", "ja", "ko", "pt", "th", "tw"];
const pages = [
"Home",
- "PageHeader"
+ "PageHeader",
+ "Benchmark",
+ "Perplexity",
];
const locale = persistentAtom("locale", "en");
diff --git a/src/pages/benchmark.astro b/src/pages/benchmark.astro
new file mode 100644
index 0000000..2185b7f
--- /dev/null
+++ b/src/pages/benchmark.astro
@@ -0,0 +1,26 @@
+---
+import Layout from '../layouts/Layout.astro';
+import BenchmarkUI from '../components/BenchmarkUI.jsx';
+import PageHeader from '@/components/PageHeader';
+import '@/styles/globals.css'
+---
+
+
+
+
+
+
+
+
+
diff --git a/src/pages/index.astro b/src/pages/index.astro
index 39abca3..3fd28e6 100644
--- a/src/pages/index.astro
+++ b/src/pages/index.astro
@@ -5,7 +5,7 @@ import PageHeader from '@/components/PageHeader';
import '@/styles/globals.css'
---
-
+
diff --git a/src/pages/perplexity.astro b/src/pages/perplexity.astro
new file mode 100644
index 0000000..3c5d228
--- /dev/null
+++ b/src/pages/perplexity.astro
@@ -0,0 +1,26 @@
+---
+import Layout from '../layouts/Layout.astro';
+import PerplexityUI from '../components/PerplexityUI.jsx';
+import PageHeader from '@/components/PageHeader';
+import '@/styles/globals.css'
+---
+
+
+
+
+
+
+
+
+
diff --git a/src/preload.js b/src/preload.js
index b8e65ae..a182947 100644
--- a/src/preload.js
+++ b/src/preload.js
@@ -2,6 +2,9 @@ import { ipcRenderer, contextBridge } from "electron";
contextBridge.exposeInMainWorld("electron", {
openURL: async (target) => ipcRenderer.send("openURL", target),
+ openFileDialog: async () => ipcRenderer.invoke("openFileDialog"),
+ getMaxThreads: async () => ipcRenderer.invoke("getMaxThreads"),
+ //
onAiResponse: (func) => {
ipcRenderer.on("aiResponse", (event, data) => {
func(data);
@@ -19,5 +22,30 @@ contextBridge.exposeInMainWorld("electron", {
},
runInference: async (args) => ipcRenderer.send("runInference", args),
stopInference: async (args) => ipcRenderer.send("stopInference", args),
- openFileDialog: async () => ipcRenderer.invoke("openFileDialog"),
+ //
+ onBenchmarkLog: (func) => {
+ ipcRenderer.on("benchmarkLog", (event, data) => {
+ func(data);
+ });
+ },
+ onBenchmarkComplete: (func) => {
+ ipcRenderer.on("benchmarkComplete", (event) => {
+ func();
+ });
+ },
+ runBenchmark: async (args) => ipcRenderer.send("runBenchmark", args),
+ stopBenchmark: async (args) => ipcRenderer.send("stopBenchmark", args),
+ //
+ onPerplexityLog: (func) => {
+ ipcRenderer.on("perplexityLog", (event, data) => {
+ func(data);
+ });
+ },
+ onPerplexityComplete: (func) => {
+ ipcRenderer.on("perplexityComplete", (event) => {
+ func();
+ });
+ },
+ runPerplexity: async (args) => ipcRenderer.send("runPerplexity", args),
+ stopPerplexity: async (args) => ipcRenderer.send("stopPerplexity", args),
});