diff --git a/LLama.Examples/LLama.Examples.csproj b/LLama.Examples/LLama.Examples.csproj
index a8abe3ae5..865a63212 100644
--- a/LLama.Examples/LLama.Examples.csproj
+++ b/LLama.Examples/LLama.Examples.csproj
@@ -27,6 +27,7 @@
+
diff --git a/LLama.Examples/NewVersion/SemanticKernelMemory.cs b/LLama.Examples/NewVersion/SemanticKernelMemory.cs
new file mode 100644
index 000000000..6d501599a
--- /dev/null
+++ b/LLama.Examples/NewVersion/SemanticKernelMemory.cs
@@ -0,0 +1,172 @@
+using Microsoft.SemanticKernel.Memory;
+using Microsoft.SemanticKernel;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using LLama.Common;
+using LLamaSharp.SemanticKernel.TextEmbedding;
+using Microsoft.SemanticKernel.AI.Embeddings;
+
+namespace LLama.Examples.NewVersion
+{
+ public class SemanticKernelMemory
+ {
+ private const string MemoryCollectionName = "SKGitHub";
+
+ public static async Task Run()
+ {
+ var loggerFactory = ConsoleLogger.LoggerFactory;
+ Console.WriteLine("Example from: https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/KernelSyntaxExamples/Example14_SemanticMemory.cs");
+ Console.Write("Please input your model path: ");
+ var modelPath = Console.ReadLine();
+
+ var seed = 1337;
+ // Load weights into memory
+ var parameters = new ModelParams(modelPath)
+ {
+ Seed = seed,
+ EmbeddingMode = true
+ };
+
+ using var model = LLamaWeights.LoadFromFile(parameters);
+ var embedding = new LLamaEmbedder(model, parameters);
+
+ Console.WriteLine("====================================================");
+ Console.WriteLine("======== Semantic Memory (volatile, in RAM) ========");
+ Console.WriteLine("====================================================");
+
+ /* You can build your own semantic memory combining an Embedding Generator
+ * with a Memory storage that supports search by similarity (ie semantic search).
+ *
+ * In this example we use a volatile memory, a local simulation of a vector DB.
+ *
+ * You can replace VolatileMemoryStore with Qdrant (see QdrantMemoryStore connector)
+ * or implement your connectors for Pinecone, Vespa, Postgres + pgvector, SQLite VSS, etc.
+ */
+
+ var kernelWithCustomDb = Kernel.Builder
+ .WithLoggerFactory(ConsoleLogger.LoggerFactory)
+ .WithAIService("local-llama-embed", new LLamaSharpEmbeddingGeneration(embedding), true)
+ .WithMemoryStorage(new VolatileMemoryStore())
+ .Build();
+
+ await RunExampleAsync(kernelWithCustomDb);
+ }
+
+ private static async Task RunExampleAsync(IKernel kernel)
+ {
+ await StoreMemoryAsync(kernel);
+
+ await SearchMemoryAsync(kernel, "How do I get started?");
+
+ /*
+ Output:
+
+ Query: How do I get started?
+
+ Result 1:
+ URL: : https://github.com/microsoft/semantic-kernel/blob/main/README.md
+ Title : README: Installation, getting started, and how to contribute
+
+ Result 2:
+ URL: : https://github.com/microsoft/semantic-kernel/blob/main/samples/dotnet-jupyter-notebooks/00-getting-started.ipynb
+ Title : Jupyter notebook describing how to get started with the Semantic Kernel
+
+ */
+
+ await SearchMemoryAsync(kernel, "Can I build a chat with SK?");
+
+ /*
+ Output:
+
+ Query: Can I build a chat with SK?
+
+ Result 1:
+ URL: : https://github.com/microsoft/semantic-kernel/tree/main/samples/skills/ChatSkill/ChatGPT
+ Title : Sample demonstrating how to create a chat skill interfacing with ChatGPT
+
+ Result 2:
+ URL: : https://github.com/microsoft/semantic-kernel/blob/main/samples/apps/chat-summary-webapp-react/README.md
+ Title : README: README associated with a sample chat summary react-based webapp
+
+ */
+
+ await SearchMemoryAsync(kernel, "Jupyter notebook");
+
+ await SearchMemoryAsync(kernel, "README: README associated with a sample chat summary react-based webapp");
+
+ await SearchMemoryAsync(kernel, "Jupyter notebook describing how to pass prompts from a file to a semantic skill or function");
+ }
+
+ private static async Task SearchMemoryAsync(IKernel kernel, string query)
+ {
+ Console.WriteLine("\nQuery: " + query + "\n");
+
+ var memories = kernel.Memory.SearchAsync(MemoryCollectionName, query, limit: 10, minRelevanceScore: 0.5);
+
+ int i = 0;
+ await foreach (MemoryQueryResult memory in memories)
+ {
+ Console.WriteLine($"Result {++i}:");
+ Console.WriteLine(" URL: : " + memory.Metadata.Id);
+ Console.WriteLine(" Title : " + memory.Metadata.Description);
+ Console.WriteLine(" Relevance: " + memory.Relevance);
+ Console.WriteLine();
+ }
+
+ Console.WriteLine("----------------------");
+ }
+
+ private static async Task StoreMemoryAsync(IKernel kernel)
+ {
+ /* Store some data in the semantic memory.
+ *
+ * When using Azure Cognitive Search the data is automatically indexed on write.
+ *
+ * When using the combination of VolatileStore and Embedding generation, SK takes
+ * care of creating and storing the index
+ */
+
+ Console.WriteLine("\nAdding some GitHub file URLs and their descriptions to the semantic memory.");
+ var githubFiles = SampleData();
+ var i = 0;
+ foreach (var entry in githubFiles)
+ {
+ var result = await kernel.Memory.SaveReferenceAsync(
+ collection: MemoryCollectionName,
+ externalSourceName: "GitHub",
+ externalId: entry.Key,
+ description: entry.Value,
+ text: entry.Value);
+
+ Console.WriteLine($"#{++i} saved.");
+ Console.WriteLine(result);
+ }
+
+ Console.WriteLine("\n----------------------");
+ }
+
+ private static Dictionary SampleData()
+ {
+ return new Dictionary
+ {
+ ["https://github.com/microsoft/semantic-kernel/blob/main/README.md"]
+ = "README: Installation, getting started, and how to contribute",
+ ["https://github.com/microsoft/semantic-kernel/blob/main/dotnet/notebooks/02-running-prompts-from-file.ipynb"]
+ = "Jupyter notebook describing how to pass prompts from a file to a semantic skill or function",
+ ["https://github.com/microsoft/semantic-kernel/blob/main/dotnet/notebooks//00-getting-started.ipynb"]
+ = "Jupyter notebook describing how to get started with the Semantic Kernel",
+ ["https://github.com/microsoft/semantic-kernel/tree/main/samples/skills/ChatSkill/ChatGPT"]
+ = "Sample demonstrating how to create a chat skill interfacing with ChatGPT",
+ ["https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/SemanticKernel/Memory/VolatileMemoryStore.cs"]
+ = "C# class that defines a volatile embedding store",
+ ["https://github.com/microsoft/semantic-kernel/blob/main/samples/dotnet/KernelHttpServer/README.md"]
+ = "README: How to set up a Semantic Kernel Service API using Azure Function Runtime v4",
+ ["https://github.com/microsoft/semantic-kernel/blob/main/samples/apps/chat-summary-webapp-react/README.md"]
+ = "README: README associated with a sample chat summary react-based webapp",
+ };
+ }
+ }
+}
diff --git a/LLama.Examples/NewVersion/TestRunner.cs b/LLama.Examples/NewVersion/TestRunner.cs
index 833165106..07f614226 100644
--- a/LLama.Examples/NewVersion/TestRunner.cs
+++ b/LLama.Examples/NewVersion/TestRunner.cs
@@ -20,6 +20,7 @@ public static async Task Run()
Console.WriteLine("10: Constrain response to json format using grammar.");
Console.WriteLine("11: Semantic Kernel Prompt.");
Console.WriteLine("12: Semantic Kernel Chat.");
+ Console.WriteLine("13: Semantic Kernel Memory.");
while (true)
{
@@ -78,6 +79,10 @@ public static async Task Run()
{
await SemanticKernelChat.Run();
}
+ else if (choice == 13)
+ {
+ await SemanticKernelMemory.Run();
+ }
else
{
Console.WriteLine("Cannot parse your choice. Please select again.");
diff --git a/LLama.Examples/RepoUtils.cs b/LLama.Examples/RepoUtils.cs
new file mode 100644
index 000000000..8e7283395
--- /dev/null
+++ b/LLama.Examples/RepoUtils.cs
@@ -0,0 +1,40 @@
+using Microsoft.Extensions.Logging;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace LLama.Examples
+{
+ ///
+ /// Basic logger printing to console
+ ///
+ internal static class ConsoleLogger
+ {
+ internal static ILogger Logger => LoggerFactory.CreateLogger