diff --git a/.vscode/settings.json b/.vscode/settings.json
index 36060c712..4d647e835 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -20,5 +20,4 @@
"scheme": "file"
}
],
- "asciidoc.antora.enableAntoraSupport": true,
}
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/course.adoc b/asciidoc/courses/genai-workshop/course.adoc
new file mode 100644
index 000000000..3ba330b1b
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/course.adoc
@@ -0,0 +1,40 @@
+= Gen-AI - Hands-on Workshop
+:status: active
+:duration: 2 hours
+:caption: GenAI Beyond Chat with RAG, Knowledge Graphs and Python
+:usecase: blank-sandbox
+:key-points: A comma, separated, list of learnings
+:repository: neo4j-graphacademy/genai-workshop
+
+== Course Description
+
+In this GenAI and Neo4j workshop, you will learn how Neo4j can support your GenAI projects.
+
+You will:
+
+* Use Vector indexes and embeddings in Neo4j to perform similarity and keyword search
+* Use Python and Langchain to integrate with Neo4j and OpenAI
+* Learn about Large Language Models (LLMs), hallucination and integrating knowledge graphs
+* Explore Retrieval Augmented Generation (RAG) and its role in grounding LLM-generated content
+
+After completing this workshop, you will be able to explain the terms LLM, RAG, grounding, and knowledge graphs. You will also have the knowledge and skills to create simple LLM-based applications using Neo4j and Python.
+
+=== Prerequisites
+
+Before taking this course, you should have:
+
+* A basic understanding of Graph Databases and Neo4j
+* Knowledge of Python and capable of reading simple programs
+
+While not essential, we completing the GraphAcademy link:/courses/neo4j-fundamentals/[Neo4j Fundamentals^] course.
+
+=== Duration
+
+{duration}
+
+== What you need
+
+To complete the practical tasks within this workshop, you will need:
+
+* Access to gitpod.io (you will need a github, gitpod, or bitbucket account) or a local Python environment
+* An OpenAI billing account and API key
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/1-getting-started/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/1-getting-started/lesson.adoc
new file mode 100644
index 000000000..469901a9e
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/1-getting-started/lesson.adoc
@@ -0,0 +1,83 @@
+= Getting Started
+:order: 1
+:type: lesson
+:lab: {repository-link}
+:disable-cache: true
+
+We have created a link:https://github.com/neo4j-graphacademy/genai-workshop[repository^] for this workshop.
+It contains the starter code and resources you need.
+
+A blank Neo4j Sandbox instance has also been created for you to use during this course.
+
+You can open a Neo4j Browser window throughout this course by clicking the link:#[Toggle Sandbox,role=classroom-sandbox-toggle] button in the bottom right-hand corner of the screen.
+
+== Get the code
+
+You can use Gitpod as an online IDE and workspace for this workshop.
+It will automatically clone the workshop repository and set up your environment.
+
+lab::Open `Gitpod workspace`[]
+
+[NOTE]
+You will need to login with a Github, Gitlab, or Bitbucket account.
+
+Alternatively, you can clone the repository and set up the environment yourself.
+
+[%collapsible]
+.Develop on your local machine
+====
+You will need link:https://python.org[Python] installed and the ability to install packages using `pip`.
+
+You may want to set up a virtual environment using link:https://docs.python.org/3/library/venv.html[`venv`^] or link:https://virtualenv.pypa.io/en/latest/[`virtualenv`^] to keep your dependencies separate from other projects.
+
+Clone the link:https://github.com/neo4j-graphacademy/genai-workshop[github.com/neo4j-graphacademy/genai-workshop] repository:
+
+[source,bash]
+----
+git clone https://github.com/neo4j-graphacademy/genai-workshop
+----
+
+Install the required packages using `pip`:
+
+[source,bash]
+----
+cd genai-workshop
+pip install -r requirements.txt
+----
+====
+
+== Setup the environment
+
+Create a copy of the `.env.example` file and name it `.env`.
+Fill in the required values.
+
+[source]
+.Create a .env file
+----
+include::{repository-raw}/main/.env.example[]
+----
+
+Add your Open AI API key (`OPENAI_API_KEY`), which you can get from link:https://platform.openai.com[platformn.openai.com].
+
+Update the Neo4j sandbox connection details:
+
+NEO4J_URI:: [copy]#bolt://{sandbox_ip}:{sandbox_boltPort}#
+NEO4J_USERNAME:: [copy]#{sandbox_username}#
+NEO4J_PASSWORD:: [copy]#{sandbox_password}#
+
+== Test your setup
+
+You can test your setup by running `test_environment.py` - this will attempt to connect to the Neo4j sandbox and the OpenAI API.
+
+You will see an `OK` message if you have set up your environment correctly. If any tests fail, check the contents of the `.env` file.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Success - let's get started![]
+
+[.summary]
+== Summary
+
+You have setup your environment and are ready to start the workshop.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/10-expand-graph/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/10-expand-graph/lesson.adoc
new file mode 100644
index 000000000..0b7391184
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/10-expand-graph/lesson.adoc
@@ -0,0 +1,44 @@
+= Expand the Graph
+:order: 10
+:type: challenge
+:optional: true
+:sandbox: true
+
+In this *optional* challenge, you can extend the graph with additional data.
+
+== All Courses
+
+Currently, the graph contains data from a single course, `llm-fundamentals`, you can download the link:https://data.neo4j.com/llm-vectors-unstructured/courses.zip[lesson files for all the courses^].
+
+. Download the content for all the courses - link:https://data.neo4j.com/llm-vectors-unstructured/courses.zip[data.neo4j.com/llm-vectors-unstructured/courses.zip^]
+. Update the graph with the new data
+. Explore the graph and find the connections between the courses
+
+== Additional metadata
+
+While the course content is unstructured, it contains metadata you can extract and include in the graph.
+
+Examples include:
+
+* The course title is the first level 1 heading in the file - `= Course Title`
+* Level 2 headings denote section titles - `== Section Title`
+* The lessons include parameters in the format `:parameter: value` at the top of the file, such as:
+** `:type:` - the type of lesson (e.g. `lesson`, `challenge`, `quiz`)
+** `:order:` - the order of the lesson in the module
+** `:optional:` - whether the lesson is optional
+
+Explore the course content and see what other data you can extract and include in the graph.
+
+When you are ready to move on, click Continue.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+In this optional challenge, you extended the graph with additional data.
+
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/11-next-steps/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/11-next-steps/lesson.adoc
new file mode 100644
index 000000000..3b4ee705a
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/11-next-steps/lesson.adoc
@@ -0,0 +1,20 @@
+= Next steps
+:order: 11
+:type: lesson
+
+Congratulations on completing this workshop!
+
+You have:
+
+* Used vector indexes to search for similar data
+* Created embeddings and vector indexes
+* Built a graph of unstructured data using Python and Langchain
+
+You can learn more about Neo4j at link:graphacademy.neo4j.com[Neo4j GraphAcademy].
+
+read::Finished[]
+
+[.summary]
+== Summary
+
+Congratulations on completing this workshop!
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/3d-vector.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/3d-vector.svg
new file mode 100644
index 000000000..a4fe3a616
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/3d-vector.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/Apple-tech-or-fruit.png b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/Apple-tech-or-fruit.png
new file mode 100644
index 000000000..7816211c3
Binary files /dev/null and b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/Apple-tech-or-fruit.png differ
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/vector-distance.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/vector-distance.svg
new file mode 100644
index 000000000..a29b92041
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/images/vector-distance.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/lesson.adoc
new file mode 100644
index 000000000..64cd4a6be
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/2-vectors/lesson.adoc
@@ -0,0 +1,72 @@
+= Semantic Search, Vectors, and Embeddings
+:order: 2
+:type: lesson
+
+Machine learning and natural language processing (NLP) often use vectors and embeddings to represent and understand data.
+
+== Semantic Search
+
+Semantic search aims to understand search phrases' intent and contextual meaning, rather than focusing on individual keywords.
+
+Traditional keyword search often depends on exact-match keywords or proximity-based algorithms that find similar words.
+
+For example, if you input "apple" in a traditional search, you might predominantly get results about the fruit.
+
+However, in a semantic search, the engine tries to gauge the context: Are you searching about the fruit, the tech company, or something else?
+
+image:images/Apple-tech-or-fruit.png[An apple in the middle with a tech icons on the left and a food on the right,width=700,align=center]
+
+
+== What are Vectors
+
+Vectors are simply a list of numbers.
+For example, the vector `[1, 2, 3]`` is a list of three numbers and could represent a point in three-dimensional space.
+
+image:images/3d-vector.svg[A diagram showing a 3d representation of the x,y,z coordinates 1,1,1 and 1,2,3]
+
+You can use vectors to represent many different types of data, including text, images, and audio.
+
+Using vectors with a dimensionality of hundreds and thousands in machine learning and natural language processing (NLP) is common.
+
+== What are Embeddings?
+
+When referring to vectors in the context of machine learning and NLP, the term "embedding" is typically used.
+An embedding is a vector that represents the data in a useful way for a specific task.
+
+Each dimension in a vector can represent a particular semantic aspect of the word or phrase.
+When multiple dimensions are combined, they can convey the overall meaning of the word or phrase.
+
+For example, the word "apple" might be represented by an embedding with the following dimensions:
+
+* fruit
+* technology
+* color
+* taste
+* shape
+
+You can create embeddings in various ways, but one of the most common methods is to use a **large language model**.
+
+For example, the embedding for the word "apple" is `0.0077788467, -0.02306925, -0.007360777, -0.027743412, -0.0045747845, 0.01289164, -0.021863015, -0.008587573, 0.01892967, -0.029854324, -0.0027962727, 0.020108491, -0.004530236, 0.009129008,` ... and so on.
+
+== How are vectors used in semantic search?
+
+You can use the _distance_ or _angle_ between vectors to gauge the semantic similarity between words or phrases.
+
+image::images/vector-distance.svg[A 3 dimensional chart illustrating the distance between vectors. The vectors are for the words "apple" and "fruit",width=700,align=center]
+
+Words with similar meanings or contexts will have vectors that are close together, while unrelated words will be farther apart.
+
+This principle is employed in semantic search to find contextually relevant results for a user's query.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned about semantic search, vectors, and embeddings.
+
+Next, you will use a Neo4j vector index to find similar data.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/3-search-vector/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/3-search-vector/lesson.adoc
new file mode 100644
index 000000000..79cab0d96
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/3-search-vector/lesson.adoc
@@ -0,0 +1,66 @@
+= Search using a Vector Index
+:order: 3
+:type: challenge
+:sandbox: true
+
+The Neo4j sandbox contains a sample of 1000 movies.
+Running the following Cypher query will return the titles and plots for the movies in the database:
+
+[source, cypher]
+----
+MATCH (m:Movie)
+RETURN m.title, m.plot
+----
+
+Review the movies and find a plot that you think looks interesting.
+
+You can adapt the query to only return a named movie by adding a filter:
+
+[source, cypher]
+----
+MATCH (m:Movie {title: "Toy Story"})
+RETURN m.title, m.plot
+----
+
+== Finding similar movies
+
+You can view the embedding for a movie plot by running the following query:
+
+[source, cypher]
+----
+MATCH (m:Movie {title: "Toy Story"})
+RETURN m.title, m.plotEmbedding
+----
+
+You can find similar movies using the embedding for the movie plot and a vector index.
+
+You can query the vector index to find similar movies by running the following query:
+
+[source, cypher]
+----
+MATCH (m:Movie {title: 'Toy Story'})
+
+CALL db.index.vector.queryNodes('moviePlots', 6, m.plotEmbedding)
+YIELD node, score
+
+RETURN node.title, node.plot, score
+----
+
+The `db.index.vector.queryNodes` procedure takes three arguments:
+
+* The name of the vector index to query - `moviePlots`
+* The number of results to return - `6`
+* The embedding to search for - `m.plotEmbedding`
+
+Experiment with the query to find similar movies to the one you searched earlier.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned how to use a vector index to find similar unstructured data.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/3-search-vector/reset.cypher b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/3-search-vector/reset.cypher
new file mode 100644
index 000000000..0e71a0ba7
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/3-search-vector/reset.cypher
@@ -0,0 +1,27 @@
+LOAD CSV WITH HEADERS
+FROM 'https://data.neo4j.com/llm-vectors-unstructured/movies-plot-embedding.csv' AS row
+MERGE (m:Movie {movieId: toInteger(row.movieId)})
+SET
+m.tmdbId = toInteger(row.tmdbId),
+m.imdbId = toInteger(row.imdbId),
+m.released = row.released,
+m.title = row.title,
+m.year = toInteger(row.year),
+m.plot = row.plot,
+m.budget = toInteger(row.budget),
+m.imdbRating = toFloat(row.imdbRating),
+m.poster = row.poster,
+m.runtime = toInteger(row.runtime),
+m.imdbVotes = toInteger(row.imdbVotes),
+m.revenue = toInteger(row.revenue),
+m.url = row.url
+WITH m, row
+CALL db.create.setNodeVectorProperty(m, 'plotEmbedding', apoc.convert.fromJsonList(row.plotEmbedding));
+
+CREATE VECTOR INDEX moviePlots IF NOT EXISTS
+FOR (m:Movie)
+ON m.plotEmbedding
+OPTIONS {indexConfig: {
+ `vector.dimensions`: 1536,
+ `vector.similarity_function`: 'cosine'
+}};
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/4-embeddings/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/4-embeddings/lesson.adoc
new file mode 100644
index 000000000..06ac57901
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/4-embeddings/lesson.adoc
@@ -0,0 +1,86 @@
+= Creating Embeddings
+:order: 4
+:type: challenge
+:sandbox: true
+
+
+In this task, you will use Cypher and Python to create embeddings.
+
+== Find a movie plot
+
+In the previous task, you used a vector index to find similar movies.
+
+To find a movie with a plot you define, you need to create an embedding for your text before you can query the vector index.
+
+For example, to find a movie about "A mysterious spaceship lands Earth", you need to:
+
+. Create an embedding for the text "A mysterious spaceship lands Earth".
+. Pass the embedding to the `db.index.vector.queryNodes` function.
+
+You can generate a new embedding in Cypher using the link:https://neo4j.com/docs/cypher-manual/current/genai-integrations/#single-embedding[`genai.vector.encode`^] function:
+
+For example, you can use the `OpenAI` provider to generate an embedding passing the API key as `token` in the `configuration` map:
+
+[source, cypher]
+----
+WITH genai.vector.encode("Text to create embeddings for", "OpenAI", { token: "sk-..." }) AS embedding
+RETURN embedding
+----
+
+[IMPORTANT]
+Remember to replace `sk-...` with your OpenAI API key.
+
+You can use the embedding to query the vector index to find similar movies.
+
+[source, cypher]
+----
+WITH genai.vector.encode(
+ "A mysterious spaceship lands Earth",
+ "OpenAI",
+ { token: "sk-..." }) AS myMoviePlot
+CALL db.index.vector.queryNodes('moviePlots', 6, myMoviePlot)
+YIELD node, score
+RETURN node.text, score
+----
+
+Experiment with different movie plots and observe the results.
+
+=== OpenAI API
+
+You can also use the OpenAI API to create embeddings using Python.
+
+Open the `1-knowledge-graphs-vectors\create_embeddings.py` file in the code editor.
+
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/create_embeddings.py[create]
+
+----
+
+Review the code before running it and note that:
+
+- `load_dotenv()` loads the environment variables from the `.env` file.
+- `OpenAI()` creates an instance of the OpenAI class.
+- `llm.embeddings.create()` creates an embedding for the input text using the `text-embedding-ada-002` model.
+- The response containing the embeddings is printed to the console.
+
+Run the code.
+You should see a list of numbers representing the embedding:
+
+[source]
+----
+[-0.028445715084671974, 0.009996716864407063, 0.0017208183417096734, -0.010130099952220917, ...]
+----
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned how to create embeddings using Cypher and Python.
+
+In the next task, you will learn how to create a vector index on an embedding.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/images/babe-similar-posters.jpg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/images/babe-similar-posters.jpg
new file mode 100644
index 000000000..f551d595f
Binary files /dev/null and b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/images/babe-similar-posters.jpg differ
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/lesson.adoc
new file mode 100644
index 000000000..a0ff16190
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/lesson.adoc
@@ -0,0 +1,120 @@
+= Create a Vector Index
+:order: 5
+:type: challenge
+:sandbox: true
+
+Your next task is to create a vector index using Cypher.
+
+You previously used a vector index to find similar text; you can also use a vector index to find similar images.
+
+== Movie posters
+
+GraphAcademy has loaded a dataset of movie posters into the sandbox.
+Each movie has a URL to a poster image:
+
+[source, cypher]
+MATCH (m:Movie {title: "Toy Story"})
+RETURN m.title, m.poster
+
+image:https://image.tmdb.org/t/p/w440_and_h660_face/uXDfjJbdP4ijW5hWSBrPrlKpxab.jpg[Toy Story movie poster,width=250,align=center]
+
+The data also contains embeddings for each poster:
+
+[source, cypher]
+MATCH (m:Movie {title: "Toy Story"})
+RETURN m.title, m.posterEmbedding
+
+== Create a vector index
+
+To search the movie poster embeddings, you must create a vector index.
+Review the following Cypher to create the vector index before running it:
+
+[source, cypher]
+----
+CREATE VECTOR INDEX moviePosters IF NOT EXISTS
+FOR (m:Movie)
+ON m.posterEmbedding
+OPTIONS {indexConfig: {
+ `vector.dimensions`: 512,
+ `vector.similarity_function`: 'cosine'
+}}
+----
+
+You should note the following about the index:
+
+- It is named `moviePosters`
+- It is against the `posterEmbedding` properties on `Movie` nodes
+- The vector has `512` dimensions
+- The function used to compare vectors is `cosine`
+
+[%collapsible]
+.More about dimensions
+====
+The model used to create the embeddings determines the number of dimensions in the vector.
+
+In this case, we used the link:https://openai.com/research/clip[OpenAI Clip Model^], which has 512 dimensions.
+
+We created the movie plot embeddings using link:https://platform.openai.com/docs/guides/embeddings/embedding-models[Open AI's text-embedding-ada-002 model^], which has 1536 dimensions.
+====
+
+Run the Cypher to create the vector index.
+
+Check that you created the index successfully using the `SHOW INDEXES` command.
+
+.Show Indexes
+[source,cypher]
+----
+SHOW VECTOR INDEXES
+----
+
+You should see a result similar to the following:
+
+.Show Indexes Result
+|===
+| id | name | state | populationPercent | type
+|4 | "moviePosters" | "ONLINE" | `100.0` | "VECTOR"
+|===
+
+Once the `state` is listed as "ONLINE", the index will be ready to query.
+
+The `populationPercentage` field indicates the proportion of node and property pairing.
+When the `populationPercentage` is `100.0`, all the movie embeddings have been indexed.
+
+== Similar posters
+
+You can use the `db.index.vector.queryNodes` procedure to find similar movie posters.
+
+[source, cypher]
+----
+MATCH (m:Movie{title: "Babe"})
+
+CALL db.index.vector.queryNodes('moviePosters', 6, m.posterEmbedding)
+YIELD node, score
+
+RETURN node.title, node.poster, score;
+----
+
+image:images/babe-similar-posters.jpg[3 movie posters, Babe, Lassie, Before the Rain with similar images,width=700,align=center]
+
+Pick a different movie and write a similar Cypher query to find similar posters.
+
+You can view all the movie titles using this Cypher:
+
+[source, cypher]
+----
+MATCH (m:Movie)
+RETURN m.title
+----
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned how to create a vector index in Neo4j.
+
+Next, you will learn how to model unstructured data as a graph.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/reset.cypher b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/reset.cypher
new file mode 100644
index 000000000..27dbd8559
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/5-create-vector-index/reset.cypher
@@ -0,0 +1,19 @@
+LOAD CSV WITH HEADERS
+FROM 'https://data.neo4j.com/llm-vectors-unstructured/movies-poster-embedding.csv' AS row
+MERGE (m:Movie {movieId: toInteger(row.movieId)})
+SET
+m.tmdbId = toInteger(row.tmdbId),
+m.imdbId = toInteger(row.imdbId),
+m.released = row.released,
+m.title = row.title,
+m.year = toInteger(row.year),
+m.plot = row.plot,
+m.budget = toInteger(row.budget),
+m.imdbRating = toFloat(row.imdbRating),
+m.poster = row.poster,
+m.runtime = toInteger(row.runtime),
+m.imdbVotes = toInteger(row.imdbVotes),
+m.revenue = toInteger(row.revenue),
+m.url = row.url
+WITH m, row
+CALL db.create.setNodeVectorProperty(m, 'posterEmbedding', apoc.convert.fromJsonList(row.posterEmbedding));
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/6-unstructured-data/images/graphacademy-lessons-paragraph.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/6-unstructured-data/images/graphacademy-lessons-paragraph.svg
new file mode 100644
index 000000000..8962d9db1
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/6-unstructured-data/images/graphacademy-lessons-paragraph.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/6-unstructured-data/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/6-unstructured-data/lesson.adoc
new file mode 100644
index 000000000..aac11fab8
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/6-unstructured-data/lesson.adoc
@@ -0,0 +1,43 @@
+= Unstructured data
+:order: 6
+:type: lesson
+
+Unstructured data refers to information that doesn't fit neatly into pre-defined structures and types.
+For example, text files, emails, social media posts, videos, photos, audio files, and web pages.
+
+Unstructured data is often rich in information but challenging to analyze.
+
+== Vectors and Graphs
+
+Vectors and embeddings can represent unstructured data, making it easier to identify similarities and search for related data.
+
+Graphs are a powerful tool for representing and analyzing unstructured data.
+
+For example, you can use vectors to find the correct documentation to support a customer query and a graph to understand the relationships between different products and customer feedback.
+
+== Chunking
+
+When dealing with large amounts of data, breaking it into smaller, more manageable chunks is helpful. This process is called chunking.
+
+There are countless strategies for splitting data into chunks, and the best approach depends on the data and the problem you are trying to solve.
+
+Later in this workshop, you will import the unstructured data from GraphAcademy course and chunk it into paragraphs.
+
+You can store embeddings for individual chunks and create relationships between chunks to capture context and relationships.
+
+When storing the course content, you will create a node for each `Paragraph` chunk and a relationship `CONTAINS` between the `Lesson` and `Paragraph` nodes.
+
+image::images/graphacademy-lessons-paragraph.svg[A graph showing lesson and chunk nodes. Connected by `CONTAINS` relationship,width=700,align=center]
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned about how you can store unstructured data in a graph.
+
+In the next task, you will use Python and Langchain to load, chunk, embed, and store unstructured data in Neo4j.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/7-import-unstructured-data/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/7-import-unstructured-data/lesson.adoc
new file mode 100644
index 000000000..eaf5fb758
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/7-import-unstructured-data/lesson.adoc
@@ -0,0 +1,153 @@
+= Import Unstructured Data
+:order: 7
+:type: challenge
+:sandbox: true
+
+You will use Python and Langchain to chunk up course content and create embeddings for each chunk.
+You will then load the chunks into a Neo4j graph database.
+
+
+== The course content
+
+You will load the content from the course link:https://graphacademy.neo4j.com/courses/llm-fundamentals/[Neo4j & LLM Fundamentals^].
+
+The workshop repository you cloned contains the course data.
+
+Open the `1-knowledge-graphs-vectors\data` directory in your code editor.
+
+You should note the following structure:
+
+* `asciidoc` - contains the course content in ascidoc format
+** `courses` - the course content
+*** `llm-fundamentals` - the course name
+**** `modules` - contains numbered directories for each module
+***** `01-name` - the module name
+****** `lessons` - contains numbered directories for each lesson
+******* `01-name` - the lesson name
+******** `lesson.adoc` - the lesson content
+
+== Load the content and chunk it
+
+You will load the content and chunk it using Python and Langchain.
+
+Your code will split the lesson content into chunks of text, around 1500 characters long, each containing one or more paragraphs.
+You can determine the paragraph in the content with two newline characters (`\n\n`).
+
+Open the `1-knowledge-graphs-vectors/create_vector.py` file and review the program:
+
+[source,python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/create_vector.py[]
+----
+
+The program uses the link:https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html[`DirectoryLoader`^] class to load the content from the `data/asciidoc` directory.
+
+Your task is to add the code to:
+
+. Create a link:https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.CharacterTextSplitter.html[`CharacterTextSplitter`^] object to split the content into chunks of text.
+. Use the `split_documents` method to split the documents into chunks of text based on the existence of `\n\n` and a chunk size of 1500 characters.
+
+Create the CharacterTextSplitter object to split the content into paragraphs (`\n\n`).
+
+[source,python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/create_vector.py[tag=splitter]
+----
+
+Split the documents into chunks of text.
+
+[source,python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/create_vector.py[tag=split]
+----
+
+[TIP]
+You can run your code now to see the chunks of text.
+
+[%collapsible]
+.More on splitting
+====
+The content isn't split simply by a character (`\n\n`) or on a fixed number of characters.
+The process is more complicated.
+Chunks should be up to maximum size but conform to the character split.
+
+In this example, the `split_documents` method does the following:
+
+. Splits the documents into paragraphs (using the `separator` - `\n\n`)
+. Combines the paragraphs into chunks of text that are up 1500 characters (`chunk_size`)
+** if a single paragraph is longer than 1500 characters, the method will not split the paragraph but create a chunk larger than 1500 characters
+. Adds the last paragraph in a chunk to the start of the next paragraph to create an overlap between chunks.
+** if the last paragraph in a chunk is more than 200 characters (`chunk_overlap`) it will *not* be added to the next chunk
+
+This process ensures that:
+
+* Chunks are never too small.
+* That a paragraph is never split between chunks.
+* That chunks are significantly different, and the overlap doesn't result in a lot of repeated content.
+
+Investigate what happens when you modify the `separator`, `chunk_size` and `chunk_overlap` parameters.
+====
+
+== Create vector index
+
+Once you have chunked the content, you can use the Langchain link:https://python.langchain.com/docs/integrations/vectorstores/neo4jvector[`Neo4jVector`^] class to create embeddings, a vector index, and store the chunks in a Neo4j graph database.
+
+Modify your Python program to include the following code:
+
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/create_vector.py[tag=vector]
+----
+
+The code will create 'Chunk' nodes with `text` and `embedding` properties and a vector index called `chunkVector`.
+You should be able to identify where you pass the `Chunk`, `text`, `embedding`, and `chunkVector` parameters.
+
+[%collapsible]
+.View the complete code
+====
+[source]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/create_vector.py[]
+----
+====
+
+Run the program to create the chunk nodes and vector index. It may take a minute or two to complete.
+
+== View chunks in the sandbox
+
+You can now view the chunks in the Neo4j sandbox.
+
+[source,cypher]
+----
+MATCH (c:Chunk) RETURN c LIMIT 25
+----
+
+You can also query the vector index to find similar chunks.
+For example, you can find lesson chunks relating to a specific question, "What does Hallucination mean?":
+
+[source,cypher]
+----
+WITH genai.vector.encode(
+ "What does Hallucination mean?",
+ "OpenAI",
+ { token: "sk-..." }) AS userEmbedding
+CALL db.index.vector.queryNodes('chunkVector', 6, userEmbedding)
+YIELD node, score
+RETURN node.text, score
+----
+
+[IMPORTANT]
+Remember to replace `sk-...` with your OpenAI API key.
+
+Experiment with different questions and see how the vector index can find similar chunks.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned to use Python and Langchain to load, chunk, and vectorize unstructured data into a Neo4j graph database.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/images/generic-knowledge-graph.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/images/generic-knowledge-graph.svg
new file mode 100644
index 000000000..1d672273d
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/images/generic-knowledge-graph.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/images/neo4j-google-knowledge-graph.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/images/neo4j-google-knowledge-graph.svg
new file mode 100644
index 000000000..b3e6ed35c
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/images/neo4j-google-knowledge-graph.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/lesson.adoc
new file mode 100644
index 000000000..114677a71
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/8-knowledge-graph/lesson.adoc
@@ -0,0 +1,46 @@
+= Knowledge graph
+:order: 8
+:type: lesson
+
+Knowledge graphs are essential to many AI and machine learning applications.
+You can use a knowledge graph to give context and ground an LLM, giving it access to structured data beyond its initial training data.
+
+== What are Knowledge Graphs?
+
+Knowledge graphs are a specific implementation of a Graph Database, where information is captured and integrated from many different sources, representing the inherent knowledge of a particular domain.
+
+They provide a structured way to represent entities, their attributes, and their relationships, allowing for a comprehensive and interconnected understanding of the information within that domain.
+
+Knowledge graphs break down sources of information and integrate them, allowing you to see the relationships between the data.
+
+image::images/generic-knowledge-graph.svg[a diagram of an abstract knowledge graph showing how sources contain chunks of data about topics which can be related to other topics]
+
+You can tailor knowledge graphs for semantic search, data retrieval, and reasoning.
+
+You may not be familiar with the term knowledge graph, but you have probably used one. Search engines typically use knowledge graphs to provide information about people, places, and things.
+
+The following knowledge graph could represent Neo4j:
+
+image::images/neo4j-google-knowledge-graph.svg[An example of a knowledge graph of Neo4j showing the relationships between people, places, and things]
+
+This integration from diverse sources gives knowledge graphs a more holistic view and facilitates complex queries, analytics, and insights.
+
+[TIP]
+You can find more information about knowledge graphs including white papers, and free resources at link:https://neo4j.com/use-cases/knowledge-graph[neo4j.com/use-cases/knowledge-graph^].
+
+Knowledge graphs can readily adapt and evolve as they grow, taking on new information and structure changes.
+
+In the next task, you will build a simple graph of the course data.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+
+[.summary]
+== Summary
+
+You learned about knowledge graphs and their benefits.
+
+In the next task, you will build a simple graph of the course data.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/images/course-graph-result.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/images/course-graph-result.svg
new file mode 100644
index 000000000..ca033110b
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/images/course-graph-result.svg
@@ -0,0 +1,7 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/images/graphacademy-simple-graph.svg b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/images/graphacademy-simple-graph.svg
new file mode 100644
index 000000000..131fc243e
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/images/graphacademy-simple-graph.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/lesson.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/lesson.adoc
new file mode 100644
index 000000000..879af6f34
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/lessons/9-build-graph/lesson.adoc
@@ -0,0 +1,202 @@
+= Build Graph
+:order: 9
+:type: challenge
+:sandbox: true
+
+
+In the previous task, you used the `Neo4jVector` class to create nodes in the graph.
+Using `Neo4jVector` is an efficient and easy way to get started.
+
+To create a graph where you can also understand the relationships within the data, you must incorporate the metadata into the data model.
+
+In this lesson, you will create a graph of the course content using the `neo4j` Python driver and OpenAI API.
+
+== Data Model
+
+The data model you will create is a simplified version of the course content model you saw earlier.
+
+image::images/graphacademy-simple-graph.svg[Data model showing Course, Module, Lesson and Paragraph nodes and their relationships,width=700,align=center]
+
+The graph will contain the following nodes, properties, and relationships:
+
+- `Course`, `Module`, and `Lesson` nodes with a `name` property
+- A `url` property on `Lesson` nodes will hold the GraphAcademy URL for the lesson
+- `Paragraph` nodes will have `text` and `embedding` property
+- The `HAS_MODULE`, `HAS_LESSON`, and `CONTAINS` relationships will connect the nodes
+
+You can extract the `name` properties and `url` metadata from the directory structure of the lesson files.
+For example, the first lesson of the Neo4j & LLM Fundamentals course has the following path:
+
+[source]
+----
+courses\llm-fundamentals\modules\1-introduction\lessons\1-neo4j-and-genai\lesson.adoc
+----
+
+You can extract the following metadata from the path:
+
+- `Course.name` - `llm-fundamentals`
+- `Module.name` - `1-introduction`
+- `Lesson.name` - `1-neo4j-and-genai`
+- `Lesson.url` - `graphacademy.neo4j.com/courses/{Course.name}/{{Module.name}}/{Lesson.name}`
+
+== Extracting the data
+
+Open the `1-knowledge-graphs-vectors\build_graph.py` file in your code editor.
+
+This starter code loads and chunks the course content.
+
+.Load and chunk the content
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/build_graph.py[]
+----
+
+For each chunk, you have to create an embedding of the text and extract the metadata.
+
+Create a function to create and return an embedding using the OpenAI API:
+
+.Create embeddings
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=get_embedding]
+----
+
+Create a 2nd function, which will extract the data from the chunk:
+
+.Get course data
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=get_course_data]
+----
+
+The `get_course_data` function:
+
+. Splits the document source path to extract the `course`, `module`, and `lesson` names
+. Constructs the `url` using the extracted names
+. Extracts the `text` from the chunk
+. Creates an `embedding` using the `get_embedding` function
+. Returns a dictionary containing the extracted data
+
+== Create the graph
+
+To create the graph, you will need to:
+
+. Create an OpenAI object to generate the embeddings
+. Connect to the Neo4j database
+. Iterate through the chunks
+. Extract the course data from each chunk
+. Create the nodes and relationships in the graph
+
+Create the OpenAI object:
+
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=openai]
+----
+
+Connect to the Neo4j sandbox:
+
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=neo4j]
+----
+
+[TIP]
+.Test the connection
+====
+You could run your code now to check that you can connect to the OpenAI API and Neo4j sandbox.
+====
+
+To create the data in the graph, you will need a function that incorporates the course data into a Cypher statement and runs it in a transaction.
+
+.Create chunk function
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=create_chunk]
+----
+
+The `create_chunk` function will accept the `data` dictionary created by the `get_course_data` function.
+
+You should be able to identify the `$course`, `$module`, `$lesson`, `$url`, `$text`, and `$embedding` parameters in the Cypher statement.
+
+Iterate through the chunks and execute the `create_chunk` function:
+
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=create]
+----
+
+A new session is created for each chunk. The `execute_write` method calls the `create_chunk` function, passing the `data` dictionary created by the `get_course_data` function.
+
+Finally, close the driver.
+
+[source, python]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[tag=close]
+----
+
+[%collapsible]
+.Click to view the complete code
+====
+[source]
+----
+include::{repository-raw}/main/1-knowledge-graphs-vectors/solutions/build_graph.py[]
+----
+====
+
+== Explore the graph
+
+Run the code to create the graph.
+It will take a minute or two to complete as it creates the embeddings for each paragraph.
+
+View the graph by running the following Cypher:
+
+[source, cypher]
+----
+MATCH (c:Course)-[:HAS_MODULE]->(m:Module)-[:HAS_LESSON]->(l:Lesson)-[:CONTAINS]->(p:Paragraph)
+RETURN *
+----
+
+image::images/course-graph-result.svg[Result from the Cypher, a graph showing Course, Module, Lesson and Chunk nodes and their relationships,width=700,align=center]
+
+
+You will need to create a vector index to query the paragraph embeddings.
+
+[source, cypher]
+.Create Vector Index
+----
+CREATE VECTOR INDEX paragraphs IF NOT EXISTS
+FOR (p:Paragraph)
+ON p.embedding
+OPTIONS {indexConfig: {
+ `vector.dimensions`: 1536,
+ `vector.similarity_function`: 'cosine'
+}}
+----
+
+You can use the vector index and the graph to find a lesson to help with specific questions:
+
+[source, cypher]
+.Find a lesson
+----
+WITH genai.vector.encode(
+ "How does RAG help ground an LLM?",
+ "OpenAI",
+ { token: "sk-..." }) AS userEmbedding
+CALL db.index.vector.queryNodes('paragraphs', 6, userEmbedding)
+YIELD node, score
+MATCH (l:Lesson)-[:CONTAINS]->(node)
+RETURN l.name, l.url, score
+----
+
+Explore the graph and see how the relationships between the nodes can bring additional meaning to the unstructured data.
+
+== Continue
+
+When you are ready, you can move on to the next task.
+
+read::Move on[]
+[.summary]
+== Summary
+
+You created a graph of the course content using the `neo4j` Python driver and OpenAI API.
\ No newline at end of file
diff --git a/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/module.adoc b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/module.adoc
new file mode 100644
index 000000000..021585163
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/1-knowledge-graphs-vectors/module.adoc
@@ -0,0 +1,20 @@
+= Knowledge Graphs, Unstructured Data, and Vectors
+:order: 1
+
+== Workshop Overview
+
+In this workshop, you will:
+
+* Use vector indexes to search for similar data
+* Create embeddings and vector indexes
+* Build a graph of unstructured data using Python and Langchain
+
+== Workshop Structure
+
+This workshop is hands-on, and you will be writing code.
+
+Each section will have a brief introduction, followed by a practical activity.
+
+If you are ready, let's get going!
+
+link:./1-getting-started/[Ready? Let's go →, role=btn]
diff --git a/asciidoc/courses/genai-workshop/modules/2-llm-rag-python-langchain/module.adoc b/asciidoc/courses/genai-workshop/modules/2-llm-rag-python-langchain/module.adoc
new file mode 100644
index 000000000..037b68676
--- /dev/null
+++ b/asciidoc/courses/genai-workshop/modules/2-llm-rag-python-langchain/module.adoc
@@ -0,0 +1,4 @@
+= LLMs, RAG, Python, and Langchain
+:order: 2
+
+COMING SOON
\ No newline at end of file