diff --git a/LLama.Examples/Examples/Runner.cs b/LLama.Examples/ExampleRunner.cs similarity index 94% rename from LLama.Examples/Examples/Runner.cs rename to LLama.Examples/ExampleRunner.cs index 54358ce70..869d1edf4 100644 --- a/LLama.Examples/Examples/Runner.cs +++ b/LLama.Examples/ExampleRunner.cs @@ -1,8 +1,7 @@ using Spectre.Console; +using LLama.Examples.Examples; -namespace LLama.Examples.Examples; - -public class Runner +public class ExampleRunner { private static readonly Dictionary> Examples = new() { @@ -26,7 +25,7 @@ public class Runner { "Batched Executor (Fork)", BatchedExecutorFork.Run }, { "Batched Executor (Rewind)", BatchedExecutorRewind.Run }, { "SK Kernel Memory.", KernelMemory.Run }, - { "Exit", async () => Environment.Exit(0) } + { "Exit", () => { Environment.Exit(0); return Task.CompletedTask; } } }; public static async Task Run() diff --git a/LLama.Examples/Examples/BatchedExecutorFork.cs b/LLama.Examples/Examples/BatchedExecutorFork.cs index b834190ce..b5cf6c43e 100644 --- a/LLama.Examples/Examples/BatchedExecutorFork.cs +++ b/LLama.Examples/Examples/BatchedExecutorFork.cs @@ -15,8 +15,7 @@ public class BatchedExecutorFork public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath); using var model = LLamaWeights.LoadFromFile(parameters); diff --git a/LLama.Examples/Examples/BatchedExecutorRewind.cs b/LLama.Examples/Examples/BatchedExecutorRewind.cs index 25195a56e..9a25b6e5f 100644 --- a/LLama.Examples/Examples/BatchedExecutorRewind.cs +++ b/LLama.Examples/Examples/BatchedExecutorRewind.cs @@ -16,8 +16,7 @@ public class BatchedExecutorRewind public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath); using var model = LLamaWeights.LoadFromFile(parameters); diff --git a/LLama.Examples/Examples/ChatChineseGB2312.cs b/LLama.Examples/Examples/ChatChineseGB2312.cs index 3a9fe6c79..a1d78b095 100644 --- a/LLama.Examples/Examples/ChatChineseGB2312.cs +++ b/LLama.Examples/Examples/ChatChineseGB2312.cs @@ -22,8 +22,7 @@ public static async Task Run() " to use https://huggingface.co/hfl/chinese-alpaca-2-7b-gguf/blob/main/ggml-model-q5_0.gguf, which has been verified by LLamaSharp developers."); Console.ForegroundColor = ConsoleColor.White; - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath) { diff --git a/LLama.Examples/Examples/ChatSessionStripRoleName.cs b/LLama.Examples/Examples/ChatSessionStripRoleName.cs index 1246db59a..6b8b6187f 100644 --- a/LLama.Examples/Examples/ChatSessionStripRoleName.cs +++ b/LLama.Examples/Examples/ChatSessionStripRoleName.cs @@ -6,8 +6,7 @@ public class ChatSessionStripRoleName { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath) { diff --git a/LLama.Examples/Examples/ChatSessionWithHistory.cs b/LLama.Examples/Examples/ChatSessionWithHistory.cs index 98ba7d75e..17908908d 100644 --- a/LLama.Examples/Examples/ChatSessionWithHistory.cs +++ b/LLama.Examples/Examples/ChatSessionWithHistory.cs @@ -6,8 +6,7 @@ public class ChatSessionWithHistory { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath) { diff --git a/LLama.Examples/Examples/ChatSessionWithRoleName.cs b/LLama.Examples/Examples/ChatSessionWithRoleName.cs index d6b0d98e8..de3314130 100644 --- a/LLama.Examples/Examples/ChatSessionWithRoleName.cs +++ b/LLama.Examples/Examples/ChatSessionWithRoleName.cs @@ -6,8 +6,7 @@ public class ChatSessionWithRoleName { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath) { diff --git a/LLama.Examples/Examples/CodingAssistant.cs b/LLama.Examples/Examples/CodingAssistant.cs index b51b5f1eb..808c3904c 100644 --- a/LLama.Examples/Examples/CodingAssistant.cs +++ b/LLama.Examples/Examples/CodingAssistant.cs @@ -2,27 +2,27 @@ { using LLama.Common; using System; - using System.Reflection; internal class CodingAssistant { - const string DefaultModelUri = "https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q4_K_S.gguf"; - // Source paper with example prompts: // https://doi.org/10.48550/arXiv.2308.12950 const string InstructionPrefix = "[INST]"; const string InstructionSuffix = "[/INST]"; - const string SystemInstruction = "You're an intelligent, concise coding assistant. Wrap code in ``` for readability. Don't repeat yourself. Use best practice and good coding standards."; - private static string ModelsDirectory = Path.Combine(Directory.GetParent(Assembly.GetExecutingAssembly().Location)!.FullName, "Models"); + const string SystemInstruction = "You're an intelligent, concise coding assistant. " + + "Wrap code in ``` for readability. Don't repeat yourself. " + + "Use best practice and good coding standards."; public static async Task Run() { - Console.Write("Please input your model path (if left empty, a default model will be downloaded for you): "); - var modelPath = Console.ReadLine(); - - if(string.IsNullOrWhiteSpace(modelPath) ) + string modelPath = UserSettings.GetModelPath(); + if (!modelPath.Contains("codellama", StringComparison.InvariantCultureIgnoreCase)) { - modelPath = await GetDefaultModel(); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("WARNING: the model you selected is not a Code LLama model!"); + Console.WriteLine("For this example we specifically recommend 'codellama-7b-instruct.Q4_K_S.gguf'"); + Console.WriteLine("Press ENTER to continue..."); + Console.ReadLine(); } var parameters = new ModelParams(modelPath) @@ -35,12 +35,14 @@ public static async Task Run() Console.ForegroundColor = ConsoleColor.Yellow; Console.WriteLine("The executor has been enabled. In this example, the LLM will follow your instructions." + - "\nIt's a 7B Code Llama, so it's trained for programming tasks like \"Write a C# function reading a file name from a given URI\" or \"Write some programming interview questions\"." + + "\nIt's a 7B Code Llama, so it's trained for programming tasks like \"Write a C# function reading " + + "a file name from a given URI\" or \"Write some programming interview questions\"." + "\nWrite 'exit' to exit"); Console.ForegroundColor = ConsoleColor.White; - var inferenceParams = new InferenceParams() { - Temperature = 0.8f, + var inferenceParams = new InferenceParams() + { + Temperature = 0.8f, MaxTokens = -1, }; @@ -51,7 +53,7 @@ public static async Task Run() { Console.ForegroundColor = ConsoleColor.Green; - await foreach (var text in executor.InferAsync(instruction + System.Environment.NewLine, inferenceParams)) + await foreach (var text in executor.InferAsync(instruction + Environment.NewLine, inferenceParams)) { Console.Write(text); } @@ -61,34 +63,5 @@ public static async Task Run() instruction = Console.ReadLine() ?? "Ask me for instructions."; } } - - private static async Task GetDefaultModel() - { - var uri = new Uri(DefaultModelUri); - var modelName = uri.Segments[^1]; - await Console.Out.WriteLineAsync($"The following model will be used: {modelName}"); - var modelPath = Path.Combine(ModelsDirectory, modelName); - if(!Directory.Exists(ModelsDirectory)) - { - Directory.CreateDirectory(ModelsDirectory); - } - - if (File.Exists(modelPath)) - { - await Console.Out.WriteLineAsync($"Existing model found, using {modelPath}"); - } - else - { - await Console.Out.WriteLineAsync($"Model not found locally, downloading {DefaultModelUri}..."); - using var http = new HttpClient(); - await using var downloadStream = await http.GetStreamAsync(uri); - await using var fileStream = new FileStream(modelPath, FileMode.Create, FileAccess.Write); - await downloadStream.CopyToAsync(fileStream); - await Console.Out.WriteLineAsync($"Model downloaded and saved to {modelPath}"); - } - - - return modelPath; - } } } diff --git a/LLama.Examples/Examples/GetEmbeddings.cs b/LLama.Examples/Examples/GetEmbeddings.cs index 9eab7c072..9a816b054 100644 --- a/LLama.Examples/Examples/GetEmbeddings.cs +++ b/LLama.Examples/Examples/GetEmbeddings.cs @@ -6,9 +6,7 @@ public class GetEmbeddings { public static void Run() { - Console.ForegroundColor = ConsoleColor.White; - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); Console.ForegroundColor = ConsoleColor.DarkGray; var @params = new ModelParams(modelPath) { EmbeddingMode = true }; diff --git a/LLama.Examples/Examples/GrammarJsonResponse.cs b/LLama.Examples/Examples/GrammarJsonResponse.cs index 0d914f316..647aa7bd3 100644 --- a/LLama.Examples/Examples/GrammarJsonResponse.cs +++ b/LLama.Examples/Examples/GrammarJsonResponse.cs @@ -7,11 +7,10 @@ public class GrammarJsonResponse { public static async Task Run() { - var gbnf = (await File.ReadAllTextAsync("Assets/json.gbnf")).Trim(); - var grammar = Grammar.Parse(gbnf, "root"); + string modelPath = UserSettings.GetModelPath(); - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + var gbnf = File.ReadAllText("Assets/json.gbnf").Trim(); + var grammar = Grammar.Parse(gbnf, "root"); var parameters = new ModelParams(modelPath) { @@ -27,10 +26,10 @@ public static async Task Run() Console.ForegroundColor = ConsoleColor.White; using var grammarInstance = grammar.CreateInstance(); - var inferenceParams = new InferenceParams() - { - Temperature = 0.6f, - AntiPrompts = new List { "Question:", "#", "Question: ", ".\n" }, + var inferenceParams = new InferenceParams() + { + Temperature = 0.6f, + AntiPrompts = new List { "Question:", "#", "Question: ", ".\n" }, MaxTokens = 50, Grammar = grammarInstance }; diff --git a/LLama.Examples/Examples/InstructModeExecute.cs b/LLama.Examples/Examples/InstructModeExecute.cs index 7e3e09720..54a9858d1 100644 --- a/LLama.Examples/Examples/InstructModeExecute.cs +++ b/LLama.Examples/Examples/InstructModeExecute.cs @@ -6,8 +6,8 @@ public class InstructModeExecute { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + var prompt = File.ReadAllText("Assets/dan.txt").Trim(); var parameters = new ModelParams(modelPath) diff --git a/LLama.Examples/Examples/InteractiveModeExecute.cs b/LLama.Examples/Examples/InteractiveModeExecute.cs index 0cff5ec94..40d84df8c 100644 --- a/LLama.Examples/Examples/InteractiveModeExecute.cs +++ b/LLama.Examples/Examples/InteractiveModeExecute.cs @@ -6,8 +6,8 @@ public class InteractiveModeExecute { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + var prompt = (await File.ReadAllTextAsync("Assets/chat-with-bob.txt")).Trim(); var parameters = new ModelParams(modelPath) diff --git a/LLama.Examples/Examples/KernelMemory.cs b/LLama.Examples/Examples/KernelMemory.cs index 7c40c1259..df250af7c 100644 --- a/LLama.Examples/Examples/KernelMemory.cs +++ b/LLama.Examples/Examples/KernelMemory.cs @@ -1,12 +1,6 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using LLamaSharp.KernelMemory; +using LLamaSharp.KernelMemory; using Microsoft.KernelMemory; using Microsoft.KernelMemory.Configuration; -using Microsoft.KernelMemory.Handlers; namespace LLama.Examples.Examples { @@ -14,14 +8,18 @@ public class KernelMemory { public static async Task Run() { - Console.WriteLine("Example from: https://github.com/microsoft/kernel-memory/blob/main/examples/101-using-core-nuget/Program.cs"); - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("This example is from : \n" + + "https://github.com/microsoft/kernel-memory/blob/main/examples/101-using-core-nuget/Program.cs"); + var searchClientConfig = new SearchClientConfig { MaxMatchesCount = 1, AnswerTokens = 100, }; + var memory = new KernelMemoryBuilder() .WithLLamaSharpDefaults(new LLamaSharpConfig(modelPath) { diff --git a/LLama.Examples/Examples/LoadAndSaveSession.cs b/LLama.Examples/Examples/LoadAndSaveSession.cs index 678d3eb97..fded50e03 100644 --- a/LLama.Examples/Examples/LoadAndSaveSession.cs +++ b/LLama.Examples/Examples/LoadAndSaveSession.cs @@ -1,5 +1,4 @@ -using DocumentFormat.OpenXml.Bibliography; -using LLama.Common; +using LLama.Common; namespace LLama.Examples.Examples { @@ -7,8 +6,8 @@ public class SaveAndLoadSession { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + var prompt = (await File.ReadAllTextAsync("Assets/chat-with-bob.txt")).Trim(); var parameters = new ModelParams(modelPath) diff --git a/LLama.Examples/Examples/LoadAndSaveState.cs b/LLama.Examples/Examples/LoadAndSaveState.cs index d4874ff3c..75c597bfa 100644 --- a/LLama.Examples/Examples/LoadAndSaveState.cs +++ b/LLama.Examples/Examples/LoadAndSaveState.cs @@ -6,8 +6,8 @@ public class LoadAndSaveState { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + var prompt = (await File.ReadAllTextAsync("Assets/chat-with-bob.txt")).Trim(); var parameters = new ModelParams(modelPath) @@ -21,9 +21,10 @@ public static async Task Run() var ex = new InteractiveExecutor(context); Console.ForegroundColor = ConsoleColor.Yellow; - Console.WriteLine("The executor has been enabled. In this example, the prompt is printed, the maximum tokens is set to 64 and the context size is 256. (an example for small scale usage)"); - Console.ForegroundColor = ConsoleColor.White; + Console.WriteLine("The executor has been enabled. In this example, the prompt is printed, " + + "the maximum tokens is set to 64 and the context size is 256. (an example for small scale usage)"); + Console.ForegroundColor = ConsoleColor.White; Console.Write(prompt); var inferenceParams = new InferenceParams() { Temperature = 0.6f, AntiPrompts = new List { "User:" } }; diff --git a/LLama.Examples/Examples/QuantizeModel.cs b/LLama.Examples/Examples/QuantizeModel.cs index 1a67f750e..233b59678 100644 --- a/LLama.Examples/Examples/QuantizeModel.cs +++ b/LLama.Examples/Examples/QuantizeModel.cs @@ -4,8 +4,7 @@ public class QuantizeModel { public static void Run() { - Console.Write("Please input your original model path: "); - var inputPath = Console.ReadLine(); + string inputPath = UserSettings.GetModelPath(); Console.Write("Please input your output model path: "); var outputPath = Console.ReadLine(); diff --git a/LLama.Examples/Examples/SemanticKernelChat.cs b/LLama.Examples/Examples/SemanticKernelChat.cs index 16e57f37f..258ca86bd 100644 --- a/LLama.Examples/Examples/SemanticKernelChat.cs +++ b/LLama.Examples/Examples/SemanticKernelChat.cs @@ -8,9 +8,11 @@ public class SemanticKernelChat { public static async Task Run() { - Console.WriteLine("Example from: https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/KernelSyntaxExamples/Example17_ChatGPT.cs"); - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("This example is from: \n" + + "https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/KernelSyntaxExamples/Example17_ChatGPT.cs"); // Load weights into memory var parameters = new ModelParams(modelPath); @@ -19,7 +21,8 @@ public static async Task Run() var chatGPT = new LLamaSharpChatCompletion(ex); - var chatHistory = chatGPT.CreateNewChat("This is a conversation between the assistant and the user. \n\n You are a librarian, expert about books. "); + var chatHistory = chatGPT.CreateNewChat("This is a conversation between the " + + "assistant and the user. \n\n You are a librarian, expert about books. "); Console.WriteLine("Chat content:"); Console.WriteLine("------------------------"); @@ -33,7 +36,8 @@ public static async Task Run() await MessageOutputAsync(chatHistory); // Second user message - chatHistory.AddUserMessage("I love history and philosophy, I'd like to learn something new about Greece, any suggestion"); + chatHistory.AddUserMessage("I love history and philosophy, I'd like to learn " + + "something new about Greece, any suggestion"); await MessageOutputAsync(chatHistory); // Second bot assistant message diff --git a/LLama.Examples/Examples/SemanticKernelMemory.cs b/LLama.Examples/Examples/SemanticKernelMemory.cs index 980d74bbf..1c9471d86 100644 --- a/LLama.Examples/Examples/SemanticKernelMemory.cs +++ b/LLama.Examples/Examples/SemanticKernelMemory.cs @@ -1,9 +1,6 @@ using LLama.Common; -using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Memory; using LLamaSharp.SemanticKernel.TextEmbedding; -using Microsoft.SemanticKernel.AI.Embeddings; -using Microsoft.SemanticKernel.Plugins.Memory; namespace LLama.Examples.Examples { @@ -13,10 +10,10 @@ public class SemanticKernelMemory public static async Task Run() { - var loggerFactory = ConsoleLogger.LoggerFactory; - Console.WriteLine("Example from: https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/KernelSyntaxExamples/Example14_SemanticMemory.cs"); - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + + Console.WriteLine("This example is from: \n" + + "https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/KernelSyntaxExamples/Example14_SemanticMemory.cs"); var seed = 1337u; // Load weights into memory diff --git a/LLama.Examples/Examples/SemanticKernelPrompt.cs b/LLama.Examples/Examples/SemanticKernelPrompt.cs index 40838e8be..fdf58b3ac 100644 --- a/LLama.Examples/Examples/SemanticKernelPrompt.cs +++ b/LLama.Examples/Examples/SemanticKernelPrompt.cs @@ -1,5 +1,4 @@ -using System.Security.Cryptography; -using LLama.Common; +using LLama.Common; using LLamaSharp.SemanticKernel.ChatCompletion; using Microsoft.SemanticKernel; using LLamaSharp.SemanticKernel.TextCompletion; @@ -12,9 +11,11 @@ public class SemanticKernelPrompt { public static async Task Run() { - Console.WriteLine("Example from: https://github.com/microsoft/semantic-kernel/blob/main/dotnet/README.md"); - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); + + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("This example is from: " + + "https://github.com/microsoft/semantic-kernel/blob/main/dotnet/README.md"); // Load weights into memory var parameters = new ModelParams(modelPath); diff --git a/LLama.Examples/Examples/StatelessModeExecute.cs b/LLama.Examples/Examples/StatelessModeExecute.cs index 47dd0bbd1..e46a024e4 100644 --- a/LLama.Examples/Examples/StatelessModeExecute.cs +++ b/LLama.Examples/Examples/StatelessModeExecute.cs @@ -7,8 +7,7 @@ public class StatelessModeExecute { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); var parameters = new ModelParams(modelPath) { diff --git a/LLama.Examples/Examples/TalkToYourself.cs b/LLama.Examples/Examples/TalkToYourself.cs index 1e94bfe00..19931e0f5 100644 --- a/LLama.Examples/Examples/TalkToYourself.cs +++ b/LLama.Examples/Examples/TalkToYourself.cs @@ -1,5 +1,4 @@ -using System.Security.Cryptography; -using System.Text; +using System.Text; using LLama.Abstractions; using LLama.Common; @@ -9,8 +8,7 @@ public class TalkToYourself { public static async Task Run() { - Console.Write("Please input your model path: "); - var modelPath = Console.ReadLine(); + string modelPath = UserSettings.GetModelPath(); // Load weights into memory var @params = new ModelParams(modelPath); diff --git a/LLama.Examples/Program.cs b/LLama.Examples/Program.cs index a8b9b39a3..0de4e6058 100644 --- a/LLama.Examples/Program.cs +++ b/LLama.Examples/Program.cs @@ -1,11 +1,20 @@ -using LLama.Examples.Examples; -using LLama.Native; +using LLama.Native; -Console.WriteLine("======================================================================================================"); +Console.WriteLine( + """ + ====================================================================================================== + __ __ ____ __ + /\ \ /\ \ /\ _`\ /\ \ + \ \ \ \ \ \ __ ___ ___ __ \ \,\L\_\\ \ \___ __ _ __ _____ + \ \ \ __\ \ \ __ /'__`\ /' __` __`\ /'__`\ \/_\__ \ \ \ _ `\ /'__`\ /\`'__\/\ '__`\ + \ \ \L\ \\ \ \L\ \/\ \L\.\_ /\ \/\ \/\ \ /\ \L\.\_ /\ \L\ \\ \ \ \ \ /\ \L\.\_\ \ \/ \ \ \L\ \ + \ \____/ \ \____/\ \__/.\_\\ \_\ \_\ \_\\ \__/.\_\\ `\____\\ \_\ \_\\ \__/.\_\\ \_\ \ \ ,__/ + \/___/ \/___/ \/__/\/_/ \/_/\/_/\/_/ \/__/\/_/ \/_____/ \/_/\/_/ \/__/\/_/ \/_/ \ \ \/ + \ \_\ + \/_/ + ====================================================================================================== -Console.WriteLine(" __ __ ____ __ \r\n/\\ \\ /\\ \\ /\\ _`\\ /\\ \\ \r\n\\ \\ \\ \\ \\ \\ __ ___ ___ __ \\ \\,\\L\\_\\\\ \\ \\___ __ _ __ _____ \r\n \\ \\ \\ __\\ \\ \\ __ /'__`\\ /' __` __`\\ /'__`\\ \\/_\\__ \\ \\ \\ _ `\\ /'__`\\ /\\`'__\\/\\ '__`\\ \r\n \\ \\ \\L\\ \\\\ \\ \\L\\ \\/\\ \\L\\.\\_ /\\ \\/\\ \\/\\ \\ /\\ \\L\\.\\_ /\\ \\L\\ \\\\ \\ \\ \\ \\ /\\ \\L\\.\\_\\ \\ \\/ \\ \\ \\L\\ \\\r\n \\ \\____/ \\ \\____/\\ \\__/.\\_\\\\ \\_\\ \\_\\ \\_\\\\ \\__/.\\_\\\\ `\\____\\\\ \\_\\ \\_\\\\ \\__/.\\_\\\\ \\_\\ \\ \\ ,__/\r\n \\/___/ \\/___/ \\/__/\\/_/ \\/_/\\/_/\\/_/ \\/__/\\/_/ \\/_____/ \\/_/\\/_/ \\/__/\\/_/ \\/_/ \\ \\ \\/ \r\n \\ \\_\\ \r\n \\/_/ "); - -Console.WriteLine("======================================================================================================"); + """); NativeLibraryConfig .Instance @@ -13,6 +22,5 @@ .WithLogs(); NativeApi.llama_empty_call(); -Console.WriteLine(); -await Runner.Run(); \ No newline at end of file +await ExampleRunner.Run(); \ No newline at end of file diff --git a/LLama.Examples/RepoUtils.cs b/LLama.Examples/RepoUtils.cs deleted file mode 100644 index 8e7283395..000000000 --- a/LLama.Examples/RepoUtils.cs +++ /dev/null @@ -1,40 +0,0 @@ -using Microsoft.Extensions.Logging; -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace LLama.Examples -{ - /// - /// Basic logger printing to console - /// - internal static class ConsoleLogger - { - internal static ILogger Logger => LoggerFactory.CreateLogger(); - - internal static ILoggerFactory LoggerFactory => s_loggerFactory.Value; - - private static readonly Lazy s_loggerFactory = new(LogBuilder); - - private static ILoggerFactory LogBuilder() - { - return Microsoft.Extensions.Logging.LoggerFactory.Create(builder => - { - builder.SetMinimumLevel(LogLevel.Warning); - - builder.AddFilter("Microsoft", LogLevel.Trace); - builder.AddFilter("Microsoft", LogLevel.Debug); - builder.AddFilter("Microsoft", LogLevel.Information); - builder.AddFilter("Microsoft", LogLevel.Warning); - builder.AddFilter("Microsoft", LogLevel.Error); - - builder.AddFilter("Microsoft", LogLevel.Warning); - builder.AddFilter("System", LogLevel.Warning); - - builder.AddConsole(); - }); - } - } -} diff --git a/LLama.Examples/UserSettings.cs b/LLama.Examples/UserSettings.cs new file mode 100644 index 000000000..1a0bb36b8 --- /dev/null +++ b/LLama.Examples/UserSettings.cs @@ -0,0 +1,73 @@ +namespace LLama.Examples; + +internal static class UserSettings +{ + private static readonly string SettingsFilePath = Path.Join(AppContext.BaseDirectory, "DefaultModel.env"); + + private static string? ReadDefaultModelPath() + { + if (!File.Exists(SettingsFilePath)) + return null; + + string path = File.ReadAllText(SettingsFilePath).Trim(); + if (!File.Exists(path)) + return null; + + return path; + } + + private static void WriteDefaultModelPath(string path) + { + File.WriteAllText(SettingsFilePath, path); + } + + public static string GetModelPath(bool alwaysPrompt = false) + { + string? defaultPath = ReadDefaultModelPath(); + return defaultPath is null || alwaysPrompt + ? PromptUserForPath() + : PromptUserForPathWithDefault(defaultPath); + } + + private static string PromptUserForPath() + { + while (true) + { + Console.ForegroundColor = ConsoleColor.White; + Console.Write("Please input your model path: "); + string? path = Console.ReadLine(); + + if (File.Exists(path)) + { + WriteDefaultModelPath(path); + return path; + } + + Console.WriteLine("ERROR: invalid model file path\n"); + } + } + + private static string PromptUserForPathWithDefault(string defaultPath) + { + while (true) + { + Console.ForegroundColor = ConsoleColor.White; + Console.WriteLine($"Default model: {defaultPath}"); + Console.Write($"Please input a model path (or ENTER for default): "); + string? path = Console.ReadLine(); + + if (string.IsNullOrWhiteSpace(path)) + { + return defaultPath; + } + + if (File.Exists(path)) + { + WriteDefaultModelPath(path); + return path; + } + + Console.WriteLine("ERROR: invalid model file path\n"); + } + } +}