diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 1cd89632..0d9de9bb 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -1,7 +1,7 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/javascript-node
{
- "name": "ollama-js",
+ "name": "genai-js",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/javascript-node:20-bullseye",
@@ -24,7 +24,7 @@
}
},
- "postCreateCommand": "npm install -g ollamazure tsx; cd demos && npm install",
+ "postCreateCommand": "npm install -g ollamazure tsx; npm install; cd videos/demos && npm install",
// Set minimal host requirements for the container.
"hostRequirements": {
diff --git a/.github/instructions/genaiscript.instructions.md b/.github/instructions/genaiscript.instructions.md
new file mode 100644
index 00000000..6d53bf1a
--- /dev/null
+++ b/.github/instructions/genaiscript.instructions.md
@@ -0,0 +1,19 @@
+---
+applyTo: "**/*.genai.*"
+---
+
+## GenAIScript Code Generation Instructions
+
+GenAIScript is a custom runtime for node.js. It provides a set of unique APIs and support the TypeScript syntax, ESM, await/async.
+
+- GenAIScript documentation: https://microsoft.github.io/genaiscript/llms-full.txt
+- GenAIScript ambient type definitions: https://microsoft.github.io/genaiscript/genaiscript.d.ts
+
+## Guidance for Code Generation
+
+- you always generate JavaScript code using ESM modules for Node.JS.
+- you prefer using APIs from GenAIScript `genaiscript.d.ts` rather than node.js. Do NOT use node.js imports.
+- you keep the code simple, avoid exception handlers or error checking.
+- you add `TODOs` where you are unsure so that the user can review them
+- you use the global types in genaiscript.d.ts are already loaded in the global context, no need to import them.
+- save generated code with `.genai.js` extension
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 45464960..1a134f32 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,7 +1,7 @@
{
"code-runner.executorMap": {
- "typescript": "cd demos && dotenv run -- tsx",
- "javascript": "cd demos && dotenv run -- node",
+ "typescript": "tsx",
+ "javascript": "node",
},
"code-runner.clearPreviousOutput": true,
"code-runner.saveFileBeforeRun": true,
diff --git a/README.md b/README.md
index 62c749fa..4e47b663 100644
--- a/README.md
+++ b/README.md
@@ -4,114 +4,96 @@
# Generative AI with JavaScript
+[](https://github.com/microsoft/generative-ai-with-javascript/blob/main/LICENSE)
[](https://codespaces.new/microsoft/generative-ai-with-javascript?hide_repo_select=true&ref=main&quickstart=true)
-[](https://aka.ms/genai-js)
-
-[](LICENSE)
+[](https://aka.ms/genai-js)
+[](https://github.com/microsoft/generative-ai-with-javascript/graphs/contributors/)
+[](https://github.com/microsoft/generative-ai-with-javascript/issues/)
+[](https://github.com/microsoft/generative-ai-with-javascript/pulls/)
+[](http://makeapullrequest.com)
-⭐ If you like this repo, star it on GitHub — it helps a lot!
+[](https://github.com/microsoft/generative-ai-with-javascript/watchers/)
+[](https://github.com/microsoft/generative-ai-with-javascript/network/)
+[](https://github.com/microsoft/generative-ai-with-javascript/stargazers/)
-[Overview](#overview) • [Quickstart](#quickstart) • [Video series](#video-series) • [Tutorials](#tutorials) • [Code samples](#code-samples) • [Community](#community) • [More resources](#more-resources)
+[](https://discord.gg/kzRShWzttr)
-
+⭐ If you like this repo, star it on GitHub — it helps a lot!
-## Overview
+[Getting Started](#getting-started) • [Lessons](#lessons) • [Keep Learning](#keep-learning) • [Other Courses](#other-courses)
-The repository aims to centralize our content related to the **Generative AI with JavaScript** topic. You'll find code samples, tutorials as well all the resources from the video series, with best practices and tips to supercharge your AI skills. If you're a JavaScript developer looking to get started with AI, or if you already have some experience and want to learn more, you've come to the right place!
+
-We cover the basics of generative AI and how to build AI applications using JavaScript, from local development to deployment on Azure, up to running and scaling your AI models. This is a living repository, so come back often to check for new content! ✨
+This course aims to take you through a series of 5 lessons so that you can integrate Generative AI in your JavaScript apps.
-[📺 Watch the video series](https://aka.ms/genai-js)
+To make it a fun learning experience, we've added a narrative to each lesson. It's a *time travelling adventure* where you will encounter famous characters throughout history! ✨
> [!IMPORTANT]
> All the content in this repository is open-source, so feel free to **reuse it, adapt it to make it your own, and share it with others!**
-### How to use this repository?
-
-You'll find in the repo all the content associated with the video series, including slides, demos, scripts, as well as additional code samples, tutorials, and resources.
-You can use it to follow along with the series, run the demos, replay the sessions or reuse the content in your own presentations. Once you've learned what you need, you can use the code samples and tutorials to kickstart your own AI applications.
-
-It's organized as follows:
-- [**Sessions**](sessions/): Contains the description, abstracts, resources links and scripts for each session.
-- [**Slides**](slides/): Contains the slides for each session.
-- [**Demos**](demos/): Contains the demos associated with each session.
-- [**Tutorials**](#tutorials): Contains hands-on tutorials that you can follow to learn more about AI and JavaScript.
-- [**Code samples**](#code-samples): Contains code samples that you can use as a starting point for your own projects.
-- [**Community**](#community): How to join the Azure AI community on Discord.
-
-## Quickstart
-
-If you're eager to put your hands on the code, you can pick one of the following quickstarts to get started right away:
-
-- [Get started with Ollama and Phi-3, directly in your browser](https://github.com/sinedied/ollama-javascript-playground#ollama-javascript-playground) (Needs a free [GitHub account](https://github.com/join))
-- [Get started with GitHub Models, directly in your browser](https://github.com/Azure-Samples/github-models-javascript-playground) (Needs a free [GitHub account](https://github.com/join) and to sign up for the [public beta access](https://github.com/marketplace/models/waitlist/join))
-- [Get started with Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Ctypescript%2Cpython-new&pivots=programming-language-javascript) (Needs a free [Azure account](https://azure.com/free))
+## 🌱 Getting Started
-## Video series
+Along this course you'll find many code examples and exercises, so we encourage you to run and experiment with the code in your own copy of this repository:
-| # | Session | Description | Slides | Demo | Script | Video |
-|---|---------|-------------|--------|------|--------|-------|
-| 0 | Series introduction | Introduces the series and its content. | [pptx](slides/00-intro.pptx) / [pdf](slides/pdf/00-intro.pdf) | - | [Script](sessions/00-intro.md) | [📺](https://www.youtube.com/watch?v=vLYtDgs_zx8&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=1) |
-| 1 | What you need to know about LLMs | Explores what are LLMs, how they're trained, how they work and their limits. | [pptx](slides/01-llms.pptx) / [pdf](slides/pdf/01-llms.pdf) | [Demo](demos/01-llms/) | [Script](sessions/01-llms.md) | [📺](https://www.youtube.com/watch?v=GQ_2OjNZ9aA&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=2) |
-| 2 | Essential prompt engineering techniques | Practical prompt engineering techniques to get the best out of AI models. | [pptx](slides/02-prompt-engineering.pptx) / [pdf](slides/pdf/02-prompt-engineering.pdf) | [Demo](demos/02-prompt-engineering/) | [Script](sessions/02-prompt-engineering.md) | [📺](https://www.youtube.com/watch?v=gQ6TlyxBmWs&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=3) |
-| 3 | Improve AI accuracy and reliability with RAG | Introduces Retrieval-Augmented Generation, to use AI with your own data. | [pptx](slides/03-rag.pptx) / [pdf](slides/pdf/03-rag.pdf) | [Demo](demos/03-rag/) | [Script](sessions/03-rag.md) | [📺](https://www.youtube.com/watch?v=xkFOmx5yxIA&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=4) |
-| 4 | Speed up your AI development with LangChain.js | Covers LangChain.js framework core concepts, and how to use it to accelerate AI developments. | [pptx](slides/04-langchainjs.pptx) / [pdf](slides/pdf/04-langchainjs.pdf) | [Demo](demos/04-langchainjs/) | [Script](sessions/04-langchainjs.md) | [📺](https://www.youtube.com/watch?v=02IDU8eCX8o&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=5) |
-| 5 | Run AI models on your local machine with Ollama | Shows how to integrate local AI models into your development workflow. | [pptx](slides/05-local-models.pptx) / [pdf](slides/pdf/05-local-models.pdf) | [Demo](demos/05-local-models/) | [Script](sessions/05-local-models.md) | [📺](https://www.youtube.com/watch?v=dLfNnoPv4AQ&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=6) |
-| 6 | Get started with AI for free using Phi-3 | Experiments with Ollama and Phi-3 model directly from your browser. | [pptx](slides/06-playground.pptx) / [pdf](slides/pdf/06-playground.pdf) | [Demo](demos/06-playground/) | [Script](sessions/06-playground.md) | [📺](https://www.youtube.com/watch?v=Ds32MS9SHzU&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=7) |
-| 7 | Introduction to Azure AI Foundry | Kickstart your journey with Azure AI Foundry. | [pptx](slides/07-ai-foundry.pptx) / [pdf](slides/pdf/07-ai-foundry.pdf) | [Demo](demos/07-ai-foundry/) | [Script](sessions/07-ai-foundry.md) | [📺](https://www.youtube.com/watch?v=9Mo-VOGk8ng&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=8) |
-| 8 | Building Generative AI Apps with Azure Cosmos DB | Build generative AI apps with Azure Cosmos DB and vector search. | [pptx](slides/08-cosmos-db.pptx) / [pdf](slides/pdf/08-cosmos-db.pdf) | [Demo](demos/08-cosmos-db/) | [Script](sessions/08-cosmos-db.md) | [📺](https://www.youtube.com/watch?v=-GQyaLbeqxQ&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=9) |
-| 9 | Azure tools & services for hosting and storing AI apps | Build, deploy, and scale AI applications using Azure tools. | [pptx](slides/08-azure-tools.pptx) / [pdf](slides/pdf/08-azure-tools.pdf) | - | [Script](sessions/08-azure-tools.md) | [📺](https://www.youtube.com/watch?v=WB6Fpzhwyug&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=10) |
-| 10 | Streaming Generative AI output with the AI Chat Protocol | Integrate streaming easily into your apps with the AI Chat Protocol. | [pptx](slides/10-chat-protocol.pptx) / [pdf](slides/pdf/10-chat-protocol.pdf) | [Demo](demos/10-chat-protocol/) | [Script](sessions/10-chat-protocol.md) | [📺](https://www.youtube.com/watch?v=fzDCW-6hMtU&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=11) |
+1. Select the **Fork** button in the upper right-hand corner of the repository or select this button:
+ [](https://github.com/microsoft/generative-ai-with-javascript/fork)
-New segments will be added to the series over time, so stay tuned!
+2. On your forked repository, select the **Code** button, then the **Codespaces** tab, then **Create codespace**. This will create a preconfigured online environment for you. You'll then be able to use [GitHub Models](https://github.com/marketplace/models) to run the code examples and interact with LLMs for free, without any additional setup.
-### Run the demos
+
+
Using your local environment
-Demos are located in the [demo/](demos/) folder and organized by sessions. Each session has a folder with a `readme.md` file that explains the demo, its prerequisites and how to run it.
+If you prefer working in your local environment, first you need to install the following tools:
+- [Git](https://git-scm.com/downloads)
+- [Node.js LTS](https://nodejs.org/en/download)
+- [Visual Studio Code](https://code.visualstudio.com/download)
-## Tutorials
+ Then after forking the repository, you can clone it to your local machine:
-You can find here hands-on tutorials to put your AI skills into practice, with step-by-step instructions that will guide you through the process of building applications making use of AI with JavaScript.
+1. On your forked repository, select the **Code** button, then the **Local** tab, and copy the URL of your forked repository.
+2. Open a terminal and run this command to clone the repo: `git clone `
+3. Open the cloned repository in Visual Studio Code.
-| Name | Description | Duration | Level | Links |
-| ---- | ----------- | -------- | ----- | ----- |
-| Azure OpenAI RAG workshop - Qdrant version | Explore the fundamentals of custom ChatGPT experiences based on a corpus of documents. We will create a vector database and fill-in with data from PDF documents, and then build a chat website and API to be able to ask questions about information contained in these documents. | 3h | Beginner | [Tutorial](https://moaw.dev/workshop/gh:azure-samples/azure-openai-rag-workshop/docs/workshop-qdrant.md) / [Repo](https://github.com/Azure-Samples/azure-openai-rag-workshop) |
-| Azure OpenAI RAG workshop - AI Search version | Explore the fundamentals of custom ChatGPT experiences based on a corpus of documents. We will create a vector database and fill-in with data from PDF documents, and then build a chat website and API to be able to ask questions about information contained in these documents. | 3h | Beginner | [Tutorial](https://moaw.dev/workshop/?src=gh:azure-samples/azure-openai-rag-workshop/docs/) / [Repo](https://github.com/Azure-Samples/azure-openai-rag-workshop) |
-| Integrate OpenAI, Communication, and Organizational Data Features into a Line of Business App | This tutorial demonstrates how Azure OpenAI, Azure Communication Services, and Microsoft Graph/Microsoft Graph Toolkit can be integrated into a Line of Business (LOB) application to enhance user productivity, elevate the user experience, and take LOB apps to the next level | 3h | Beginner | [Tutorial](https://learn.microsoft.com/microsoft-cloud/dev/tutorials/openai-acs-msgraph) / [Repo](https://github.com/microsoft/MicrosoftCloud) |
+
-## Code samples
+### 📦 Each Lesson Includes
-The following code samples are available to help you get started with building your own AI applications. Use them to learn how to architect your applications, experiment with different features, or use them as a starting point for your own projects.
+- A **written lesson** with an assignment and quiz.
+- A short **video**, to help further your learning.
+- **Solution** for assignment and quiz.
+- **Character app**: running this app demonstrates Generative AI and gives you a chance to have conversation with a historical figure.
-| Name | Description | GitHub Repo | Technologies |
-| ---- | ----------- | ----------- | ------------ |
-| Serverless AI Chat with RAG using LangChain.js | This sample shows how to build a serverless AI chat experience with Retrieval-Augmented Generation using LangChain.js and Azure. | [Repo](https://github.com/Azure-Samples/serverless-chat-langchainjs) | LangChain.js, Lit, Vite, Azure OpenAI, Azure Cosmos DB, Azure Blob Storage, Azure Functions, Azure Static Web Apps, AZD |
-| Serverless Azure OpenAI Quick Start with LlamaIndex | This sample shows how to quickly get started with LlamaIndex.ai on Azure | [Repo](https://github.com/Azure-Samples/llama-index-javascript) | LlamaIndex.ts, Next.js, Azure OpenAI, Azure Container Apps, Docker, AZD |
-| Serverless Azure OpenAI Assistant Quick Start | Azure OpenAI Assistants allows you to create AI assistants tailored to your needs through custom instructions and augmented by advanced tools. | [Repo](https://github.com/Azure-Samples/azure-openai-assistant-javascript) | HTML-CSS-JavaScript, Azure Static Web Apps, Azure Blob Storage, Azure Functions, Azure OpenAI, AZD |
-| Serverless RAG application with LlamaIndex and code interpreter on Azure Container Apps | This is a LlamaIndex project using Next.js bootstrapped with create-llama. It uses Azure Container Apps as a serverless deployment platform. | [Repo](https://github.com/Azure-Samples/llama-index-azure-code-interpreter) | Azure OpenAI, LlamaIndex, Azure Container Apps, Next.js, GitHub Actions, TypeScript, AZD |
-| Azure OpenAI keyless deployment | Example Azure OpenAI deployment and RBAC role for your user account for keyless access. | [Repo](https://github.com/Azure-Samples/azure-openai-keyless-js) | Azure OpenAI, AZD, JavaScript |
-| Azure OpenAI secure UI starter | Reusable OpenAI secure UI and infrastructure for AI Chat with Azure. | [Repo](https://github.com/Azure-Samples/openai-secure-ui-js) | Lit, Vite, Web Components, Azure OpenAI, Azure Blob Storage, Azure Functions, Azure Static Web Apps, AZD |
-| Azure OpenAI with LangChain.js | Demonstrates how to use Azure OpenAI with LangChain.js. | [Repo](https://github.com/Azure-Samples/openai-langchainjs) | LangChain.js, Azure OpenAI, TypeScript |
-| Azure API Management with Azure Open AI | Demonstrates how to add Azure API Management on top of your Azure Open AI endpoints. | [Repo](https://github.com/Azure-Samples/genai-gateway-apim) | Azure Open AI, Azure API Management, Express, Node.js, AZD |
-| Ask YouTube: LangChain.js + Azure Quickstart | Build a generative AI application using LangChain.js from local to Azure. | [Repo](https://github.com/Azure-Samples/langchainjs-quickstart-demo) | LangChain.js, Azure OpenAI, Azure AI Search, Azure Functions |
-| Ollama JavaScript Playground | Generative AI playground using Ollama OpenAI API and JavaScript. Try AI models in your browser! | [Repo](https://github.com/sinedied/ollama-javascript-playground/) | TypeScript, Ollama, Azure OpenAI, GitHub Codespaces |
-| Copilot / Node.js + OpenAI Todo app | Discover how to use GitHub Copilot to quickly build a Node.js application with OpenAI and Azure. | [Repo](https://github.com/Azure-Samples/copilot-openai-todo) | Azure OpenAI, Azure App Service, Azure CosmosDB, Express, TypeScript, TypeChat, GitHub Actions |
-| ChatGPT + Enterprise data with Azure OpenAI and Azure AI Search | A TypeScript sample app for the Retrieval Augmented Generation pattern running on Azure. | [Repo](https://github.com/Azure-Samples/azure-search-openai-javascript) | LangChain.js, Lit, Vite, Azure OpenAI, Azure Cosmos DB, Azure Blog Storage, Azure Container Apps, Azure Static Web Apps, Docker, AZD |
-| Azure OpenAI Chat Frontend | The Chat App Protocol compliant user-interface building block for your chat-gpt like RAG pattern applications built with Azure OpenAI and Azure AI Search. | [Repo](https://github.com/Azure-Samples/azure-openai-chat-frontend) | Chat Protocol, Lit, Azure Static Web Apps, AZD |
-| Enterprise-grade Reference Architecture for JavaScript | Intelligent enterprise-grade reference architecture for JavaScript featuring OpenAI integration, Azure Developer CLI template, and Playwright tests. | [Repo](https://github.com/Azure-Samples/contoso-real-estate) | Azure Container Apps, Azure Functions, Azure Cosmos DB, SWA, Fastify, Next.js, Angular, AZD |
+## 🗃️ Lessons
-## Community
+| # | Lesson Link | Description |
+| ---- | ----------- | ----------- |
+| 1 | [Introduction to Generative AI and LLMs for JavaScript Developers](./lessons/01-intro-to-genai) | Understand Generative AI and LLM fundamentals, their applications and limits in JavaScript, and how to use AI to enhance user experiences. |
+| 2 | [Build your first AI app](./lessons/02-first-ai-app) | Set up your development environment, write a basic app, and understand system prompts. |
+| 3 | [Prompt engineering](./lessons/03-prompt-engineering) | Learn Ppompt engineering fundamentals, techniques, and meta-prompts for better AI outcomes. |
+| 4 | [Structured output](./lessons/04-structured-output) | Learn structured output, how to extract data from prompts, and present it in various formats (like JSON) for easier consumption. |
+| 5 | [Retrieval augmented generation (RAG)](./lessons/05-rag) | Learn the basics of RAG, how to integrate external data, and how to leverage it for more relevant, accurate AI responses. |
-**Join the Azure AI community on Discord: https://aka.ms/genai-discord**
+New lessons will be added to the course over time, so stay tuned!
-If you're looking to connect with others, we have a community of AI enthusiasts, developers, and experts who are passionate about AI and are always willing to help.
+## 🙌 Keep Learning
-There's even a dedicated [#JavaScript channel](https://discordapp.com/channels/1113626258182504448/1237357005555892225) where you can ask questions, share your projects, and discuss your about your favorite topics.
+After completing this course, you can continue learning by exploring the [video series](videos/README.md) and its related content.
-## More resources
+You'll also find additional resources in the form of tutorials, code samples and open community here: [Additional Resources](../docs/additional-resources.md)
-Here are some additional resources that you might find useful on your AI journey:
+## 🎒 Other Courses
-- Generative AI for Beginners: https://github.com/microsoft/generative-ai-for-beginners
-- Phi-3 cookbook: https://aka.ms/phi3/cookbook
-- Try Azure Cosmos DB for free: https://aka.ms/trycosmosdb
+- [Generative AI for Beginners](https://aka.ms/genai-beginners)
+- [Generative AI for Beginners .NET](https://github.com/microsoft/Generative-AI-for-beginners-dotnet)
+- [Generative AI with JavaScript](https://github.com/microsoft/generative-ai-with-javascript)
+- [AI for Beginners](https://aka.ms/ai-beginners)
+- [AI Agents for Beginners - A Course](https://github.com/microsoft/ai-agents-for-beginners)
+- [Data Science for Beginners](https://aka.ms/datascience-beginners)
+- [ML for Beginners](https://aka.ms/ml-beginners)
+- [Cybersecurity for Beginners](https://github.com/microsoft/Security-101)
+- [Web Dev for Beginners](https://aka.ms/webdev-beginners)
+- [IoT for Beginners](https://aka.ms/iot-beginners)
+- [XR Development for Beginners](https://github.com/microsoft/xr-development-for-beginners)
+- [Mastering GitHub Copilot for Paired Programming](https://github.com/microsoft/Mastering-GitHub-Copilot-for-Paired-Programming)
+- [Mastering GitHub Copilot for C#/.NET Developers](https://github.com/microsoft/mastering-github-copilot-for-dotnet-csharp-developers)
+- [Choose Your Own Copilot Adventure](https://github.com/microsoft/CopilotAdventures)
diff --git a/docs/additional-resources.md b/docs/additional-resources.md
new file mode 100644
index 00000000..9ca76f95
--- /dev/null
+++ b/docs/additional-resources.md
@@ -0,0 +1,48 @@
+
+
+To continue your learning journey, we've curated a list of additional resources, including tutorials, code samples, and community links. These resources may help you find your next learning path and connect with others who share your interests.
+
+## Tutorials
+
+You can find here hands-on tutorials to put your AI skills into practice, with step-by-step instructions that will guide you through the process of building applications making use of AI with JavaScript.
+
+| Name | Description | Duration | Level | Links |
+| ---- | ----------- | -------- | ----- | ----- |
+| Azure OpenAI RAG workshop - Qdrant version | Explore the fundamentals of custom ChatGPT experiences based on a corpus of documents. We will create a vector database and fill-in with data from PDF documents, and then build a chat website and API to be able to ask questions about information contained in these documents. | 3h | Beginner | [Tutorial](https://moaw.dev/workshop/gh:azure-samples/azure-openai-rag-workshop/docs/workshop-qdrant.md) / [Repo](https://github.com/Azure-Samples/azure-openai-rag-workshop) |
+| Azure OpenAI RAG workshop - AI Search version | Explore the fundamentals of custom ChatGPT experiences based on a corpus of documents. We will create a vector database and fill-in with data from PDF documents, and then build a chat website and API to be able to ask questions about information contained in these documents. | 3h | Beginner | [Tutorial](https://moaw.dev/workshop/?src=gh:azure-samples/azure-openai-rag-workshop/docs/) / [Repo](https://github.com/Azure-Samples/azure-openai-rag-workshop) |
+| Integrate OpenAI, Communication, and Organizational Data Features into a Line of Business App | This tutorial demonstrates how Azure OpenAI, Azure Communication Services, and Microsoft Graph/Microsoft Graph Toolkit can be integrated into a Line of Business (LOB) application to enhance user productivity, elevate the user experience, and take LOB apps to the next level | 3h | Beginner | [Tutorial](https://learn.microsoft.com/microsoft-cloud/dev/tutorials/openai-acs-msgraph) / [Repo](https://github.com/microsoft/MicrosoftCloud) |
+
+## Code samples
+
+The following code samples are available to help you get started with building your own AI applications. Use them to learn how to architect your applications, experiment with different features, or use them as a starting point for your own projects.
+
+| Name | Description | GitHub Repo | Technologies |
+| ---- | ----------- | ----------- | ------------ |
+| Serverless AI Chat with RAG using LangChain.js | This sample shows how to build a serverless AI chat experience with Retrieval-Augmented Generation using LangChain.js and Azure. | [Repo](https://github.com/Azure-Samples/serverless-chat-langchainjs) | LangChain.js, Lit, Vite, Azure OpenAI, Azure Cosmos DB, Azure Blob Storage, Azure Functions, Azure Static Web Apps, AZD |
+| Serverless Azure OpenAI Quick Start with LlamaIndex | This sample shows how to quickly get started with LlamaIndex.ai on Azure | [Repo](https://github.com/Azure-Samples/llama-index-javascript) | LlamaIndex.ts, Next.js, Azure OpenAI, Azure Container Apps, Docker, AZD |
+| Serverless Azure OpenAI Assistant Quick Start | Azure OpenAI Assistants allows you to create AI assistants tailored to your needs through custom instructions and augmented by advanced tools. | [Repo](https://github.com/Azure-Samples/azure-openai-assistant-javascript) | HTML-CSS-JavaScript, Azure Static Web Apps, Azure Blob Storage, Azure Functions, Azure OpenAI, AZD |
+| Serverless RAG application with LlamaIndex and code interpreter on Azure Container Apps | This is a LlamaIndex project using Next.js bootstrapped with create-llama. It uses Azure Container Apps as a serverless deployment platform. | [Repo](https://github.com/Azure-Samples/llama-index-azure-code-interpreter) | Azure OpenAI, LlamaIndex, Azure Container Apps, Next.js, GitHub Actions, TypeScript, AZD |
+| Azure OpenAI keyless deployment | Example Azure OpenAI deployment and RBAC role for your user account for keyless access. | [Repo](https://github.com/Azure-Samples/azure-openai-keyless-js) | Azure OpenAI, AZD, JavaScript |
+| Azure OpenAI secure UI starter | Reusable OpenAI secure UI and infrastructure for AI Chat with Azure. | [Repo](https://github.com/Azure-Samples/openai-secure-ui-js) | Lit, Vite, Web Components, Azure OpenAI, Azure Blob Storage, Azure Functions, Azure Static Web Apps, AZD |
+| Azure OpenAI with LangChain.js | Demonstrates how to use Azure OpenAI with LangChain.js. | [Repo](https://github.com/Azure-Samples/openai-langchainjs) | LangChain.js, Azure OpenAI, TypeScript |
+| Azure API Management with Azure Open AI | Demonstrates how to add Azure API Management on top of your Azure Open AI endpoints. | [Repo](https://github.com/Azure-Samples/genai-gateway-apim) | Azure Open AI, Azure API Management, Express, Node.js, AZD |
+| Ask YouTube: LangChain.js + Azure Quickstart | Build a generative AI application using LangChain.js from local to Azure. | [Repo](https://github.com/Azure-Samples/langchainjs-quickstart-demo) | LangChain.js, Azure OpenAI, Azure AI Search, Azure Functions |
+| Ollama JavaScript Playground | Generative AI playground using Ollama OpenAI API and JavaScript. Try AI models in your browser! | [Repo](https://github.com/sinedied/ollama-javascript-playground/) | TypeScript, Ollama, Azure OpenAI, GitHub Codespaces |
+| Copilot / Node.js + OpenAI Todo app | Discover how to use GitHub Copilot to quickly build a Node.js application with OpenAI and Azure. | [Repo](https://github.com/Azure-Samples/copilot-openai-todo) | Azure OpenAI, Azure App Service, Azure CosmosDB, Express, TypeScript, TypeChat, GitHub Actions |
+| ChatGPT + Enterprise data with Azure OpenAI and Azure AI Search | A TypeScript sample app for the Retrieval Augmented Generation pattern running on Azure. | [Repo](https://github.com/Azure-Samples/azure-search-openai-javascript) | LangChain.js, Lit, Vite, Azure OpenAI, Azure Cosmos DB, Azure Blog Storage, Azure Container Apps, Azure Static Web Apps, Docker, AZD |
+| Azure OpenAI Chat Frontend | The Chat App Protocol compliant user-interface building block for your chat-gpt like RAG pattern applications built with Azure OpenAI and Azure AI Search. | [Repo](https://github.com/Azure-Samples/azure-openai-chat-frontend) | Chat Protocol, Lit, Azure Static Web Apps, AZD |
+| Enterprise-grade Reference Architecture for JavaScript | Intelligent enterprise-grade reference architecture for JavaScript featuring OpenAI integration, Azure Developer CLI template, and Playwright tests. | [Repo](https://github.com/Azure-Samples/contoso-real-estate) | Azure Container Apps, Azure Functions, Azure Cosmos DB, SWA, Fastify, Next.js, Angular, AZD |
+
+## Community
+
+**Join the Azure AI community on Discord: https://aka.ms/genai-discord**
+
+If you're looking to connect with others, we have a community of AI enthusiasts, developers, and experts who are passionate about AI and are always willing to help.
+
+There's even a dedicated [#JavaScript channel](https://discordapp.com/channels/1113626258182504448/1237357005555892225) where you can ask questions, share your projects, and discuss your about your favorite topics.
diff --git a/docs/scripts/.gitignore b/docs/scripts/.gitignore
new file mode 100644
index 00000000..5585b550
--- /dev/null
+++ b/docs/scripts/.gitignore
@@ -0,0 +1,3 @@
+genaiscript.d.ts
+tsconfig.json
+jsconfig.json
\ No newline at end of file
diff --git a/docs/scripts/glossary.genai.js b/docs/scripts/glossary.genai.js
new file mode 100644
index 00000000..f32ce507
--- /dev/null
+++ b/docs/scripts/glossary.genai.js
@@ -0,0 +1,175 @@
+// GenAIScript to create a shared glossary from markdown files in lessons
+script({
+ title: "Generate glossary from lessons",
+ description: "Process all markdown files under /lessons/** to create a shared glossary.md file",
+ parameters: {
+ force: {
+ type: "boolean",
+ description: "Force regeneration of the entire glossary",
+ default: false,
+ },
+ },
+ temperature: 0.1,
+});
+
+// Find all markdown files under lessons, excluding translations
+const files = await workspace.findFiles("lessons/**/*.md", {
+ ignore: "**/translations/**",
+});
+
+console.log(`Found ${files.length} markdown files to process`);
+
+// Check if glossary.md already exists
+const glossaryPath = "glossary.md";
+let existingGlossary = "";
+try {
+ const glossaryFile = await workspace.readText(glossaryPath);
+ existingGlossary = glossaryFile?.content || "";
+ console.log("Found existing glossary.md, will extend it");
+} catch (error) {
+ console.log("No existing glossary.md found, will create new one");
+}
+
+// Extract existing terms from glossary if it exists
+const existingTerms = new Set();
+if (existingGlossary) {
+ const termMatches = existingGlossary.matchAll(/^- \*\*([^*]+)\*\*/gm);
+ for (const match of termMatches) {
+ existingTerms.add(match[1].toLowerCase());
+ }
+ console.log(`Found ${existingTerms.size} existing terms in glossary`);
+}
+
+// Process each markdown file
+let allContent = "";
+for (const file of files) {
+ console.log(`Processing: ${file.filename}`);
+ const fileContent = await workspace.readText(file.filename);
+ const content = fileContent?.content || "";
+ allContent += `\n\n--- ${file.filename} ---\n\n${content}`;
+}
+
+// Create the prompt for extracting technical terms
+const { text: newTermsResponse } = await prompt`
+You are tasked with creating a comprehensive glossary of technical terms from the provided content.
+
+## Content to analyze:
+${allContent}
+
+## Instructions:
+1. Extract technical terms from the content to analyze related to:
+ - Generative AI and Machine Learning concepts
+ - Programming and development terms
+ - Web development technologies
+ - APIs and software development concepts
+ - AI/ML frameworks and tools
+ - Data science and computational terms
+
+2. For each term, provide a concise one-line definition (maximum 20 words)
+
+3. Focus on terms that would be valuable for developers learning about AI and JavaScript. Avoid terms that are too basic or not relevant to the context of AI and JavaScript development
+
+4. Exclude thise terms and concepts:
+ - Common programming terms that most developers would know (like "function", "variable", "array")
+ - Historical terms or concepts that are only there for the storytelling aspect of the lessons
+ - Terms that are too similar to existing terms. For example, "Chain of Thought" and "Chain of Thought Prompting" are too similar and should not both be included.
+
+5. Format each entry as: **Term**: Definition
+
+6. It's OK to not output anything if no new terms are found. In that case, just return an empty string.
+
+${
+ existingTerms.size > 0
+ ? `## Existing terms to avoid duplicating:
+${Array.from(existingTerms).join(", ")}`
+ : ""
+}
+
+## Output format:
+Provide only the glossary entries, one per line, sorted alphabetically. Do not include any headers, explanations, or other text.
+`;
+
+// Combine existing and new terms
+let finalGlossary = "";
+let glossarySize, previousSize = 0;
+
+if (existingGlossary && !env.vars.force) {
+ // Parse existing glossary and add new terms
+ const lines = existingGlossary.split("\n");
+ const headerEndIndex = lines.findIndex(
+ (line) => line.trim() === "" && lines[lines.indexOf(line) - 1]?.includes("technical terms")
+ );
+
+ if (headerEndIndex > 0) {
+ // Keep existing header
+ finalGlossary = lines.slice(0, headerEndIndex + 1).join("\n") + "\n";
+ } else {
+ // Create new header
+ finalGlossary = `# Glossary\n\nA comprehensive list of technical terms used throughout the lessons.\n\n`;
+ }
+
+ // Get existing entries
+ const existingEntries = [];
+ const termPattern = /^- \*\*([^*]+)\*\*: (.+)$/gm;
+ let match;
+ while ((match = termPattern.exec(existingGlossary)) !== null) {
+ existingEntries.push({ term: match[1], definition: match[2] });
+ }
+
+ // Parse new entries
+ const newEntries = [];
+ const newTermLines = newTermsResponse.split("\n").filter((line) => line.trim());
+ for (const line of newTermLines) {
+ const termMatch = line.match(/\*\*([^*]+)\*\*:\s*(.+)/);
+ if (termMatch) {
+ const term = termMatch[1].trim();
+ const definition = termMatch[2].trim();
+ if (!existingTerms.has(term.toLowerCase())) {
+ newEntries.push({ term, definition });
+ }
+ }
+ }
+
+ // Combine and sort all entries
+ const allEntries = [...existingEntries, ...newEntries];
+ allEntries.sort((a, b) => a.term.toLowerCase().localeCompare(b.term.toLowerCase()));
+
+ // Add all entries to glossary
+ for (const entry of allEntries) {
+ finalGlossary += `- **${entry.term}**: ${entry.definition}\n`;
+ }
+
+ previousSize = existingEntries.length;
+ glossarySize = allEntries.length;
+ console.log(`Added ${newEntries.length} new terms to existing glossary`);
+} else {
+ // Create completely new glossary
+ finalGlossary = `# Glossary\n\nA comprehensive list of technical terms used throughout the lessons.\n\n`;
+
+ const newTermLines = newTermsResponse.split("\n").filter((line) => line.trim());
+ const entries = [];
+
+ for (const line of newTermLines) {
+ const termMatch = line.match(/\*\*([^*]+)\*\*:\s*(.+)/);
+ if (termMatch) {
+ entries.push({ term: termMatch[1].trim(), definition: termMatch[2].trim() });
+ }
+ }
+
+ entries.sort((a, b) => a.term.toLowerCase().localeCompare(b.term.toLowerCase()));
+
+ for (const entry of entries) {
+ finalGlossary += `- **${entry.term}**: ${entry.definition}\n`;
+ }
+
+ glossarySize = entries.length;
+ console.log(`Created new glossary with ${entries.length} terms`);
+}
+
+// Write the glossary file
+await workspace.writeText(glossaryPath, finalGlossary);
+console.log(`Glossary saved to ${glossaryPath}`);
+
+env.output.appendContent(`Glossary generated with ${glossarySize} terms (previously ${previousSize} terms).\n\n`);
+env.output.appendContent(`Glossary saved to \`${glossaryPath}\`.\n`);
+env.output.appendContent(`Make sure to perform a manual review before committing the changes to ensure accuracy and relevance of the terms.\n\n`);
diff --git a/glossary.md b/glossary.md
new file mode 100644
index 00000000..91005c02
--- /dev/null
+++ b/glossary.md
@@ -0,0 +1,44 @@
+# Glossary
+
+A comprehensive list of technical terms used throughout the lessons.
+
+- **API**: A set of rules enabling software applications to communicate with each other, commonly used in generative AI integration.
+- **API key**: A private key used to authenticate requests to an application programming interface.
+- **Augmented Prompt**: A prompt enhanced with additional context or information to improve the relevance of AI-generated responses.
+- **Azure AI Studio**: A platform to build, evaluate, and deploy AI models using Microsoft Azure.
+- **Azure OpenAI**: A cloud service for deploying and scaling OpenAI models like GPT for applications.
+- **Caesar cipher**: A substitution cipher shifting characters by a fixed number of places in the alphabet.
+- **Chain-of-Thought Prompting**: A technique guiding models to break down complex tasks into sequential reasoning steps for better accuracies in outputs.
+- **Chatbot**: An application designed to simulate conversation with human users, often using natural language processing.
+- **Completions API**: API to generate text or code based on inputs, used for predictive or generative tasks in AI models.
+- **Context Window**: The amount of past input that a language model can consider when generating responses, measured in tokens.
+- **CSV**: A data format consisting of values separated by commas, often used for structured data retrieval and modification.
+- **Embedding**: Numeric vector representation of data, often used for semantic search or clustering in machine learning.
+- **Escape Hatch**: A technique instructing AI to admit lack of knowledge when data is insufficient to ensure accurate responses.
+- **Few-Shot Prompting**: A method of providing minimal examples to the model to influence its output with specific context or format.
+- **Full-Stack Development**: Development of both the client (frontend) and server (backend) in software applications.
+- **Function Calling**: A method for passing structured prompt data into specific functions within an application programmatically.
+- **GitHub Codespaces**: A cloud-based environment for coding, testing, and running applications directly from GitHub repositories.
+- **GitHub Models**: A platform hosting pre-trained AI models for use and integration with GitHub development workflows.
+- **GitHub Token**: An authentication method to access GitHub-hosted APIs or services securely.
+- **Interactive Development Environments (IDEs)**: Software providing coding, debugging, and testing tools for developers.
+- **JSON**: A lightweight data-interchange format used for structured information exchange between systems, including generative AI responses.
+- **Knowledge Bases**: Data repositories used to enhance AI applications by providing reliable, domain-specific information.
+- **LangChain**: A framework for building AI applications that focus on chaining multiple models and functionalities together.
+- **Large Language Model (LLM)**: AI models trained on large text datasets to generate human-like responses for diverse applications.
+- **Maieutic Prompting**: A technique involving follow-up queries to challenge or validate AI-generated responses for accuracy and reasoning.
+- **Managed Identity**: A secure cloud mechanism that provides applications with automatic authentication to access resources without managing passwords.
+- **Markdown**: A lightweight markup language for formatting plain text into structured layouts, like tables or lists.
+- **Meta Prompts**: Instructions added before a user's prompt to refine or restrict the AI's behavior and output format.
+- **Multimodal Capabilities**: AI functionality to process various formats like text, image, or video input and deliver diverse outputs.
+- **Node.js**: A runtime environment allowing developers to execute JavaScript code server-side for building scalable applications.
+- **OpenAI**: A pioneering organization in AI research and APIs for language models integrated into applications for generative tasks.
+- **Prompt Engineering**: The process of crafting effective prompts to guide AI models toward desired responses and behaviors.
+- **RAG (Retrieval-Augmented Generation)**: A technique combining retrieval-based methods with generative models for more accurate, data-grounded outputs.
+- **Semantic Search**: Search method leveraging the meaning of terms for more contextually accurate and nuanced results.
+- **Structured Output**: Data output organized in predefined formats like tables or JSON, enabling easier integration with systems.
+- **System Message**: A prompt in conversational AI that specifies contextual boundaries or personality for the assistant.
+- **TensorFlow.js**: A JavaScript-based machine learning library enabling browser and Node.js-based AI/ML applications and training.
+- **Tokenizer**: A tool used to convert text into tokens, providing structure for how data is inputted or analyzed by models.
+- **Vector Search**: Retrieval technique comparing encoded vectors to find semantically similar information in AI applications.
+- **XML**: A markup language formatting structured data for information storage, exchange, or generative model input/output.
diff --git a/lessons/01-intro-to-genai/README.md b/lessons/01-intro-to-genai/README.md
new file mode 100644
index 00000000..4b159a56
--- /dev/null
+++ b/lessons/01-intro-to-genai/README.md
@@ -0,0 +1,322 @@
+# Lesson 1: Introduction to Generative AI and LLMs for JavaScript Developers
+
+In this lesson, you will learn to:
+
+- Understand the basics of Generative AI and Large Language Models (LLMs).
+- Identify the potential applications and limitations of LLMs in JavaScript development.
+- Explore how Generative AI can enhance user experiences in JavaScript applications.
+
+## Generative AI
+
+By now, you've likely heard of tools like ChatGPT or Generative AI. The concept is simple: you provide a prompt, and a model—often called a Large Language Model (LLM)—generates a paragraph or even an entire page of text. This output can be used for various purposes, including creative writing, answering questions, and coding.
+
+Moreover, Generative AI has evolved into multimodal capabilities, allowing you to provide an image or video as input and receive a variety of outputs. This advancement has significantly enhanced many people's workflows—not only by generating text but also by summarizing, translating, and more.
+
+*Simply put, natural language prompts are becoming the new standard interface for many apps—and your users expect to use them.*
+
+## Narrative: A Journey Through Time
+
+> NOTE: Let’s begin with an overview of the story—one that bridges the past and future! As you progress through this curriculum, you'll embark on a thrilling adventure, traveling back in time to collaborate with some of history's greatest minds. Together, you'll tackle challenges and explore how Generative AI can revolutionize your JavaScript applications.
+
+> **Note**: While we recommend going through the story (it's fun!), [click here](#talk-to-dinocrates) if you'd prefer to jump straight to the technical content.
+
+Your journey begins in 1860s London, where you assume the role of a skilled mechanic. Through a series of gripping adventures, you'll refine your AI skills and unlock solutions that transcend time.
+
+### Into the Maelstrom - London 1860
+
+In the heart of 1860s London, you are recognized as one of the most skilled mechanics of your time. Your workshop, tucked away in a narrow alley, is a testament to your craftsmanship. The walls are lined with shelves overflowing with mechanical parts, blueprints, and half-finished projects. The air is thick with the scent of oil and metal, mingling with the faint aroma of wood shavings.
+
+Your workbench, the heart of your workshop, is an organized mess. Springs, gears, and tools are scattered across its surface, each item a crucial component in your latest creation. At the center of the bench lies the torso of a robot—an engineering marvel that has consumed months of effort. Its wooden frame is intricately carved, each joint meticulously designed for smooth movement.
+
+
+
+
+
+It's a rare sunny day in London, a brief respite from the usual fog and rain.
+
+### A letter, for you?
+
+Suddenly, a knock at the door disrupts your thoughts. Visitors at this hour are rare. Wiping your hands on a rag, you approach the door, curiosity piqued.
+
+Opening it, you find no one. Instead, your eyes are drawn to a sealed envelope on the ground. You pick it up and read:
+
+_"Dear friend,_
+
+_I'm sending you this letter to aid your endeavors with the automaton. It's crucial you continue this work. Enclosed is a key to the library. Meet me there at 3 PM today._
+
+_Yours,_
+
+_Charles Babbage."_
+
+### Off to the library
+
+Stunned, you realize that Charles Babbage, the great mathematician and inventor of the difference engine, wants to meet you. Quickly, you grab your coat and head out the door.
+
+After a 20-minute walk along the Thames, you finally arrive at the library.
+
+The door isn't locked, and glides open with a gentle push, creaking on its hinges. Inside, the air is thick with dust and the musty scent of old books. It's dark and gloomy, the only light filtering through the grimy windows, casting eerie shadows on the walls. Cobwebs hang from the ceiling, and the silence is almost palpable, broken only by the distant sound of dripping water.
+
+> Babbage, is that you?
+
+As your eyes adjust to the dim light, you notice a figure in the distance, waving at you. Assuming it's Charles Babbage, you walk towards him, your footsteps echoing on the wooden floor. The figure becomes clearer, and you recognize him from newspaper photos—his distinctive sideburns and stern expression.
+
+
+
+
+
+Just as you step closer, a blinding flash erupts, and he vanishes.
+
+Left behind is a small metallic device spinning on the floor. You pick it up, its cool, smooth surface humming softly. It resembles a tiny beetle, intricately designed, with three buttons: an up arrow, a down arrow, and a glowing red button.
+
+Compelled by curiosity, your fingers drift toward the red button. The moment you press it, colors swirl violently around you. Then—blackness.
+
+
+
+
+
+### Alexandria 300 BC
+
+You awaken, disoriented. As your vision clears, an ancient city unfolds before you—bustling, vibrant, and alive. A grand temple towers ahead, its columns adorned with intricate carvings. People in togas move through the streets, their voices blending into a symphony of ancient dialects.
+
+The air is filled with the scent of exotic spices and the distant sound of merchants hawking their wares.
+
+**You:** Surely, I must have hit my head, you think, closing your eyes and opening them again. But the scene remains unchanged. Whatever that device was, it has transported you far from London. The question is, where and when?
+
+As you take in the surroundings, you guess you are in Alexandria, the jewel of the ancient world, around 300 BC. The city is a hub of knowledge and culture, home to the Great Library and the Lighthouse of Alexandria. However, you don't see the lighthouse.
+
+
+
+
+
+### Meeting Dinocrates
+
+An elderly gentleman wearing a toga wave at you from the steps of the grand temple. His white hair and beard catch the sunlight, giving him an almost ethereal glow. You walk closer, straining to hear his words amidst the bustling city sounds. He's gesturing for you to come nearer; his movements deliberate and welcoming.
+
+A man in a toga waves from the temple steps. His white hair and beard catch the sunlight, giving him an almost ethereal glow. He gestures for you to come closer.
+
+As you approach, the weight of the device in your pocket feels heavier. You pull it out. The up arrow glows. Hesitant, you press it. Instantly, the man's voice becomes crystal clear.
+
+**Dinocrates:** "Welcome, traveler," he says warmly. "I am Dinocrates, architect of this great city. Your arrival was foretold."
+
+Your mind races—Alexandria, a prophecy, this strange device. Before you can respond, he continues.
+
+
+
+
+
+**Dinocrates:** "I need your help. Our ships struggle to navigate the coast—we must build a lighthouse. Do you know anything about them?"
+
+**You:** "I'm a mechanic. I build automatons. Let me see what I can do."
+
+A thought strikes you. Can the device understand me if I speak to it?
+
+**You:** "Device, can you understand me?"
+
+**Device:** "Of course. What do you need?"
+
+**You:** "Can you help me build a lighthouse?"
+
+**Device:** "Certainly. That won’t be a problem."
+
+**You:** "Do you have a name?"
+
+**Device:** "I am the Time Beetle. My creator calls me George; he says it's a good name for a beetle."
+
+
+
+
+
+_Time device, “George” the metallic beetle_
+
+> FUN FACT
+>
+> In 300 BC, Alexandria was a thriving city founded by Alexander the Great in 331 BC. It quickly became one of the greatest cities of the Hellenistic world. Designed by Alexander's chief architect, Dinocrates, it became a major port and cultural hub.
+>
+> Alexandria was known for its impressive structures, including the Pharos (lighthouse), one of the Seven Wonders of the Ancient World, and the legendary Library of Alexandria. The city’s strategic location made it a key center for trade and knowledge exchange.
+>
+> Under the Ptolemaic Kingdom, which followed Alexander’s death, Alexandria grew into one of the most prosperous and influential cities of its time.
+
+## Talk to Dinocrates
+
+If you want to talk to Dinocrates, run the [Dinocrates](./characters/) app.
+
+**Steps**:
+
+1. Start a [](https://codespaces.new/microsoft/generative-ai-with-javascript)
+2. Navigate to _lessons/01-intro-to-genai/characters_
+3. Locate the console and run `npm install` followed by `npm start`.
+4. Once it appears, select the "Open in Browser" button.
+5. Chat with Dinocrates.
+
+### Code Sneak Peek
+
+While we still have a lot more to cover in this Generative AI curriculum, let's take a quick peek at the AI code to begin learning about using JavaScript with AI.
+Inside of `lessons/01-intro-to-genai/characters/app.js` you'll find an `app.post`function that handles the Generative AI functionality. It's shown next:
+
+```JavaScript
+app.post('/send', async (req, res) => {
+ const { message } = req.body;
+ const prompt = message;
+
+ const messages = [
+ {
+ "role": "system",
+ "content": "You are Dinocrates of Alexandria, a famous architect and engineer. Limit your responses to only the time you live in, you don't know anything else. You only want to talk about your architecture and engineering projects, and possibly new ideas you have.",
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ];
+
+ const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+ });
+
+ try {
+ console.log(`sending prompt ${prompt}`)
+ const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+ });
+
+ res.json({
+ prompt: prompt,
+ answer: completion.choices[0]?.message?.content
+ });
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
+```
+
+Here's a step-by-step summary of what the function does:
+
+1. **Extract Message from Request**: The function extracts the message from the request body (req.body).
+2. **Create Prompt Array**: It constructs an array of messages, including a system message and the user's prompt message.
+3. **Initialize OpenAI Client**: An OpenAI client is initialized with the base URL and API key from environment variables. A _gpt-4o-mini_ model from [GitHub Models](https://github.com/marketplace/models) is used to process the prompt and return a response.
+4. **Send Prompt to OpenAI**: The function logs the prompt and sends it to the OpenAI API to generate a completion.
+5. **Handle Response**: If successful, the function responds with the prompt and the completion's answer.
+6. **Error Handling**: If an error occurs, it responds with a 500 status and the error message.
+
+> **Note**: [GitHub Copilot](https://github.com/features/copilot) was used to generate this code summary. Generative AI in action!
+
+If you also want to hear some background music of what it could have sounded like in Alexandria, you can listen to the background music below:
+
+
+
+### What Can Generative AI Do for Me and My Apps?
+
+> You've probably figured out by now that the time beetle works like an AI assistant that you can interact with using natural language, written or spoken.
+
+As your adventure in Alexandria unfolds, you begin to see the possibilities of combining creativity, ingenuity, and cutting-edge tools to solve challenges and transform the world around you.
+
+**You:** Tell me more about light houses, you say to your device.
+
+**Time beetle:** A lighthouse is a tower equipped with a bright light at the top, located near the shore to guide ships at sea. The light serves as a navigational aid, helping sailors avoid dangerous rocks and reefs and safely reach the harbor.
+
+Dinocrates overhears your conversation and adds:
+
+**Dinocrates:** We need a lighthouse to guide our ships safely into the harbor. The seas can be treacherous, and many ships have been lost to the rocks. We need a beacon of light to guide them home.
+
+#### Generative AI Application Areas
+
+**You:** Lighthouses sound like an interesting area for sure, what else can Generative AI do for me and my apps?
+
+**Time beetle:**. In the 21st century, generative AI has revolutionized many industries, from healthcare to finance to entertainment, here are some examples:
+
+- **Chat bot**: A chatbot that can generate human-like responses to user queries. Instead of a static FAQ page, users can interact with a chatbot that provides dynamic responses. This makes for a more engaging and less frustrating user experience.
+
+- **A content creation tool**:. A tool to generate blog posts and social media posts. Imagine creating campaigns in minutes instead of hours when an e-commerce site has a black Friday sale.
+
+- **Code completion**: A code completion tool that can generate code snippets based on user input. This can be a huge time saver for developers, especially when working on repetitive tasks.
+
+- **Translation** – Translate text between languages with high accuracy.
+
+As you can see, these improvements can both help the front office and the back office of your app and company.
+
+Here's an example of a "chatbot application" in action:
+
+
+
+**You:** Fascinating, I'll make a note of going to the 21st century to see how these tools are used.
+
+### Generative AI and the JavaScript Ecosystem
+
+**Time beetle:** A popular way to build apps in the 21st century is by using JavaScript. With every programming language, there's an ecosystem around it. This ecosystem includes the programming language itself, libraries and frameworks, community support, and IDEs and tools. In a programming language ecosystem, we're usually talking about the following:
+
+| What | Description |
+|---|---|
+| The programming language itself | Including its syntax and features. |
+| Libraries and frameworks | Available libraries to interact with the generative AI models. |
+| Community supporting the language| Community matters, especially when trying to learn something new. The community around libraries and frameworks helps decide what libraries to use. It also affects how easy it is to find help when you're stuck. |
+
+**You:** Interesting, I've heard of programming I think, didn't Ada Lovelace experiment with that and Charles Babbage?
+
+**Time beetle:** Yes, Ada Lovelace was the first computer programmer, and Charles Babbage was the inventor of the difference engine, a mechanical computer. They were pioneers in the field of computing, laying the foundation for the digital age.
+
+**You:** Were? What do you mean were? I just got a letter from Charles Babbage.
+
+**Time beetle:** Let's just say that you're in a unique position to interact with historical figures in a way that few others can.
+
+### JavaScript Ecosystem
+
+**You:** So ecosystems you said, I'm just taking notes here, what about JavaScript and how is it different from other ecosystems?
+
+**Time beetle:** JavaScript is one of the most popular programming languages in the world in the 21st century. Here's a few reasons why it's so popular:
+
+| What | Description |
+|-|-|
+| Potential for full-stack development | JavaScript is one of the few languages that can be used for both front-end and back-end development. |
+| Rich library ecosystem | JavaScript has a vast library ecosystem, with frameworks like React, Angular, Vue, and more. There's NPM, the package manager, which is one of the largest package repositories in the world. |
+| Strong community support | JavaScript has a large and active community, with many resources available for learning and development. It also just works in the browser, which is a huge advantage. |
+| IDEs and tools | JavaScript has a variety of IDEs available, such as Visual Studio Code, WebStorm, and Atom. These IDEs have extensions built by companies and the community helping you with various aspects of development. |
+| AI and JavaScript | JavaScript supports AI development with libraries like TensorFlow.js, Brain.js, OpenAI’s APIs, and more enabling developers to integrate machine learning and Generative AI into web and server-side applications. |
+
+**You:** That's a lot of reasons, sounds like I should bet on JavaScript for my future projects.
+
+**Time beetle:** Indeed, JavaScript is a versatile language, also Python is a popular language for AI development.
+
+**You:** Python, what do snakes have to do with programming?
+
+**Time beetle:** Let's save that for another time, shall we?
+
+**Time beetle:** I've given reasons above why JavaScript and its ecosystem is a good fit in general but why specifically for Generative AI? The answer is that it's a supported language by many cloud vendors and AI frameworks and tools. We also believe that even though Python might be top of mind for AI scenarios, many developers are using JavaScript and Typescript.
+
+> **Did you know?**
+> [62.5% of developers say they're using JavaScript](https://www.statista.com/statistics/793628/worldwide-developer-survey-most-used-languages/) with many preferring [TypeScript](https://www.typescriptlang.org) for new projects.
+
+## Assignment – Helping Dinocrates
+
+To use a Large Language Model (LLM) to help Dinocrates with the lighthouse that we mentioned earlier in our story, we’ll use something called prompts, a sentence to describe what you want. You can specify both the information you need and how you want it presented.
+
+**Time beetle:** Let's get started, let's use a LLM to research how you can build a lighthouse to help Dinocrates.
+
+**Time beetle:**: You’ll need to provide context to the LLM (i.e "me") how to build, with what tools and resources should be available in the times of Alexandria.
+
+**You:** Ok, tell me more about LLMs?
+
+**Time beetle:** LLMs are a type of AI model that can generate human-like text based on a given prompt. They are trained on vast amounts of data and can generate text that is coherent, creative, and contextually relevant.
+
+**Time beetle:** You probably want to ask me in a better way, so I can give you a better answer, about you know *cough* *cough* Light houses, Alexandria, 300 BC, Dinocrates, Lighthouse of Alexandria, etc.
+
+**You:** Got it, add more context to the prompt and then ask you.
+
+**Time beetle:** Yes, I'm waiting...
+
+Visit [Microsoft Copilot](https://copilot.microsoft.com), [ChatGPT](https://chatgpt.com/), or another online chatbot tool to generate a plan for building the lighthouse in Alexandria.
+
+> TIP: Try to have the LLM generate a plan that includes step-by-step instructions for building the lighthouse. Need help? Check out the solution for guidance.
+
+## Solution
+
+[Solution](./solution/solution.md)
+
+### Knowledge Check
+
+Which of the following statements about Generative AI and JavaScript are true?
+
+1. JavaScript powered Generative AI apps can only generate text.
+2. JavaScript can be used to build AI-powered applications, including chatbots, text generation tools, and more.
+3. Python is the only language used for AI development.
diff --git a/lessons/01-intro-to-genai/assets/alexandria.png b/lessons/01-intro-to-genai/assets/alexandria.png
new file mode 100644
index 00000000..5e29c686
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/alexandria.png differ
diff --git a/lessons/01-intro-to-genai/assets/dinocrates.mp3 b/lessons/01-intro-to-genai/assets/dinocrates.mp3
new file mode 100644
index 00000000..1da7bb39
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/dinocrates.mp3 differ
diff --git a/lessons/01-intro-to-genai/assets/dinocrates.png b/lessons/01-intro-to-genai/assets/dinocrates.png
new file mode 100644
index 00000000..9473ad9c
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/dinocrates.png differ
diff --git a/lessons/01-intro-to-genai/assets/library.png b/lessons/01-intro-to-genai/assets/library.png
new file mode 100644
index 00000000..37c45c7f
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/library.png differ
diff --git a/lessons/01-intro-to-genai/assets/london.png b/lessons/01-intro-to-genai/assets/london.png
new file mode 100644
index 00000000..a13963ea
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/london.png differ
diff --git a/lessons/01-intro-to-genai/assets/time-beetle.png b/lessons/01-intro-to-genai/assets/time-beetle.png
new file mode 100644
index 00000000..e240f140
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/time-beetle.png differ
diff --git a/lessons/01-intro-to-genai/assets/vortex.png b/lessons/01-intro-to-genai/assets/vortex.png
new file mode 100644
index 00000000..07778579
Binary files /dev/null and b/lessons/01-intro-to-genai/assets/vortex.png differ
diff --git a/lessons/01-intro-to-genai/characters/app.js b/lessons/01-intro-to-genai/characters/app.js
new file mode 100644
index 00000000..8ce938f8
--- /dev/null
+++ b/lessons/01-intro-to-genai/characters/app.js
@@ -0,0 +1,64 @@
+import express from 'express';
+import { OpenAI } from 'openai';
+import path from 'path';
+import { fileURLToPath } from 'url';
+import dotenv from 'dotenv';
+
+dotenv.config();
+
+const app = express();
+const port = process.env.PORT || 3000;
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = path.dirname(__filename);
+
+app.use(express.json());
+
+// Serve static files from the 'public' directory
+app.use(express.static(path.join(__dirname, 'public')));
+
+// Serve index.html on the default route
+app.get('/', (req, res) => {
+ res.sendFile(path.join(__dirname, 'public', 'index.html'));
+});
+
+// Route to send the prompt
+app.post('/send', async (req, res) => {
+ const { message } = req.body;
+ const prompt = message;
+
+ const messages = [
+ {
+ "role": "system",
+ "content": "You are Dinocrates of Alexandria, a famous architect and engineer. Limit your responses to only the time you live in, you don't know anything else. You only want to talk about your architecture and engineering projects, and possibly new ideas you have.",
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ];
+
+ const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+ });
+
+ try {
+ console.log(`sending prompt ${prompt}`)
+ const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+ });
+
+ res.json({
+ prompt: prompt,
+ answer: completion.choices[0]?.message?.content
+ });
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
+
+app.listen(port, () => {
+ console.log(`Server is running on http://localhost:${port}`);
+});
\ No newline at end of file
diff --git a/lessons/01-intro-to-genai/characters/package.json b/lessons/01-intro-to-genai/characters/package.json
new file mode 100644
index 00000000..91958d47
--- /dev/null
+++ b/lessons/01-intro-to-genai/characters/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "characters",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "dotenv": "^16.4.7",
+ "express": "^4.21.2",
+ "openai": "^4.85.0"
+ }
+}
diff --git a/lessons/01-intro-to-genai/characters/public/dinocrates.mp3 b/lessons/01-intro-to-genai/characters/public/dinocrates.mp3
new file mode 100644
index 00000000..1da7bb39
Binary files /dev/null and b/lessons/01-intro-to-genai/characters/public/dinocrates.mp3 differ
diff --git a/lessons/01-intro-to-genai/characters/public/dinocrates.png b/lessons/01-intro-to-genai/characters/public/dinocrates.png
new file mode 100644
index 00000000..9473ad9c
Binary files /dev/null and b/lessons/01-intro-to-genai/characters/public/dinocrates.png differ
diff --git a/lessons/01-intro-to-genai/characters/public/index.html b/lessons/01-intro-to-genai/characters/public/index.html
new file mode 100644
index 00000000..882f66af
--- /dev/null
+++ b/lessons/01-intro-to-genai/characters/public/index.html
@@ -0,0 +1,122 @@
+
+
+
+
+
+ Talk to Dinocrates
+
+
+
+
+
+
You
+
+
+
+
+
+
+
Dinocrates
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/lessons/01-intro-to-genai/solution/solution-quiz.md b/lessons/01-intro-to-genai/solution/solution-quiz.md
new file mode 100644
index 00000000..0d35ba99
--- /dev/null
+++ b/lessons/01-intro-to-genai/solution/solution-quiz.md
@@ -0,0 +1,6 @@
+
+1) **Incorrect**: Generative AI can generate text, images, and even code.
+
+2) **Correct**: JavaScript can be used for both front-end and back-end development.
+
+3) **Incorrect**: Python is not the only language that can be used for AI development. It can be argued that Python is the most popular language for AI development, but other languages/frameworks like JavaScript, Java and Python are also a very popular choices for AI development.
\ No newline at end of file
diff --git a/lessons/01-intro-to-genai/solution/solution.md b/lessons/01-intro-to-genai/solution/solution.md
new file mode 100644
index 00000000..3ff59da6
--- /dev/null
+++ b/lessons/01-intro-to-genai/solution/solution.md
@@ -0,0 +1,45 @@
+Here’s a suggestion for a prompt and associated solution.
+
+Prompt:
+
+How can I build a lighthouse that I could create with the means available in old Alexandria?
+
+Response:
+
+Building a lighthouse like the ancient Lighthouse of Alexandria, also known as the Pharos of Alexandria, would be a fascinating project! Here are some steps and considerations based on the methods and materials available in ancient Alexandria:
+
+Materials
+
+Stone and Marble: The lighthouse was primarily constructed using large blocks of light-colored stone and marble. Limestone was also commonly used.
+
+Mortar: A mixture of lime and sand was used as mortar to bind the stones together.
+
+Bronze and Iron: These metals were used for structural reinforcements and possibly for the reflective mirror at the top.
+
+Construction Steps
+
+Foundation: Start with a strong foundation. The Pharos was built on a solid rock base on the island of Pharos. Ensure the foundation is stable and can support the weight of the structure.
+
+Base Structure: Construct a square base. The original lighthouse had a massive square base that provided stability.
+
+Tower Construction: Build a cylindrical tower on top of the base. The tower was divided into three sections: a square base, a middle octagonal section, and a cylindrical top.
+
+Ramp or Staircase: Include a ramp or staircase inside the tower to allow access to the top.
+
+Light Source: At the top, create a platform for the fire. The ancient lighthouse used a large open flame as the light source. Reflective bronze mirrors were used to amplify the light.
+
+Statue or Ornament: The original lighthouse was topped with a statue, possibly of Zeus or Poseidon. You can add a similar decorative element.
+
+Additional Tips
+
+Height: The Lighthouse of Alexandria was approximately 100 meters tall, but you can adjust the height based on your resources.
+
+Ventilation: Ensure proper ventilation for the fire at the top to keep it burning brightly.
+
+Maintenance: Regular maintenance would be necessary to keep the structure and the light source in good condition.
+
+This project would require significant resources and labor, much like the original, which was one of the tallest man-made structures in the world for many centuries
+
+
+
+
diff --git a/lessons/02-first-ai-app/README.md b/lessons/02-first-ai-app/README.md
new file mode 100644
index 00000000..3b51d0e7
--- /dev/null
+++ b/lessons/02-first-ai-app/README.md
@@ -0,0 +1,547 @@
+# Lesson 2: Writing Your First AI App
+
+In this lesson, you will learn to:
+
+- Set up your development environment.
+- Write a basic app.
+- Understand system prompts.
+
+## Related resources
+
+[](https://www.youtube.com/watch?v=GQ_2OjNZ9aA&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=2)
+
+## Narrative: Picture yourself in a boat on a river
+
+> *Our story so far: You are a maker of things, a craftsman from 1860s London who has traveled through time using a mysterious device called the Time Beetle. You've journeyed through the annals of history, witnessing the creation of the lighthouse at Alexandria, a marvel of ancient engineering you helped create with a little help from Dinocrates and the time beetle.*
+>
+> See [lesson 1](../01-intro-to-genai/README.md) if you want to catch up with the story from the beginning and learn about the Generative AI.
+
+Together with Dinocrates, you put the finishing touches on the lighthouse at Alexandria. The towering structure gleams in the sunlight, its polished stone reflecting the Mediterranean Sea. As you gaze out at the vast expanse of water, you feel a profound sense of accomplishment.
+
+Proud of your achievements, your thoughts turn to your workshop in London. You look down at the time beetle in your hand, its metallic surface cool against your palm. Clenching your fist around it, you whisper, "Take me home." The beetle begins to glow, emitting a soft, warm light. A tingling sensation spreads through your body, and suddenly, the world around you dissolves into a whirlwind of colors.
+
+### A new adventure
+
+When you open your eyes, the world has shifted. Your feet feel unsteady, and you stumble, landing on a hard wooden surface. As you manage to get up, you realize you're in a boat on a river. The gentle rocking of the boat and the sound of water lapping against its sides are disorienting. You look around, trying to make sense of your surroundings. In the distance, you see buildings, their outlines blurred by the morning mist.
+
+Looking around the boat, you find a long oar resting against the side. Grasping it, you begin to row towards the distant buildings. As you get closer, the buildings come into sharper focus. They are old, their architecture reminiscent of a Renaissance painting. You sigh, realizing you're not in London. The question now is, where and when are you this time?
+
+
+
+
+
+### Help me, Leonardo
+
+You manage to secure the boat at the dock and start walking along the wooden planks, the sound of your footsteps echoing softly. The air is filled with the scent of the river and the distant murmur of the bustling city.
+
+As you walk, you notice a man with a long beard and a hat, digging through a crate of what looks like mechanical parts. His hands move deftly, sorting through gears and springs with practiced ease.
+
+
+
+
+
+
+You approach him and ask:
+
+**You:** "Excuse me, sir, where am I?" He looks up at you, confusion evident in his eyes. Realizing you've been speaking in English, you quickly use the device in your hand and ask it to translate.
+
+**Time beetle:** The Time Beetle responds, "Of course, I'll translate into 15th century Italian. 'Dove sono?'"
+
+**Old man:** The old man replies, "Siete a Firenze, signore. E chi siete voi?"
+
+**Time beetle:** The Time Beetle translates, "He says you're in Florence, and asks who you are."
+
+**You:** "Tell him I'm a maker of things, and I'm looking for a place to work."
+
+The old man nods.
+
+**Old man:** Un artigiano, eh? Avete mai sentito parlare di Leonardo da Vinci?
+
+**Time beetle:** The Time Beetle translates, "He asks if you've heard of Leonardo da Vinci."
+
+**You:** "Of course," you say. "Tell him I have and I would like to meet him."
+
+**Time beetle:** The Time Beetle translates, "Sì, mi piacerebbe incontrarlo e discutere di un progetto qui a Firenze con lui.", yes, I would like to meet him and discuss a project here in Florence with him.
+
+**Old man:** The old man smiles, "Allora, seguitemi, vi porterò da lui."
+
+**You:** You ask, "What did he say?"
+
+**Time beetle:** The Time Beetle responds, "He said he'll take you to Leonardo."
+
+The old man leads you to a large wooden door, which he unlocks and pushes open. You step inside and are greeted by the sight of a workshop filled with all sorts of mechanical contraptions.
+
+
+
+
+
+You ask the Time Beetle to inquire about Leonardo's whereabouts.
+
+**You:** "Dove è Leonardo?"
+
+**Old man:** The old man turns to you with a smile, "Sono io (that’s me), Leonardo da Vinci. Chi siete voi?", who are you?
+
+You feel a thrill of recognition.
+
+**You:** I thought so. I'm a fellow maker, out of place and time."
+
+**Leonardo:** Leonardo's eyes sparkle with curiosity. "Interessante, cosa vi porta qui?"
+
+The Time Beetle translates.
+
+**Time beetle:** "He asks what brings you here."
+
+You explain:
+
+**You:** "Well, I was working on a project, and I ended up here."
+
+You show him the Time Beetle, and his eyes light up with fascination. He examines it closely as you explain how it works and how you ended up in Florence.
+
+Leonardo looks at you with excitement.
+
+**Leonardo:** "You're a maker of things. I have a project that might interest you. I've been working on a machine that can generate text based on verbal input. Would you like to help me with it?"
+
+Leonardo da Vinci, asking you to help him with a project—you can hardly believe it. You nod eagerly and say:
+
+**You:** I would be honored to help you with your project,
+"Sarebbe un onore aiutarti con il tuo progetto."
+
+## Talk to Leonardo
+
+If you want to talk to Leonardo, run the [Leonardo](./characters/) app.
+
+**Steps**:
+
+1. Start a [](https://codespaces.new/microsoft/generative-ai-with-javascript)
+2. Navigate to _lessons/02-first-ai-app/characters_
+3. Locate the console and run `npm install` followed by `npm start`.
+4. Once it appears, select the "Open in Browser" button.
+5. Chat with Leonardo.
+
+### Code Sneak Peek
+
+While we still have a lot more to cover in this Generative AI curriculum, let's take a quick peek at the AI code to begin learning about using JavaScript with AI.
+Inside of `lessons/02-first-ai-app/characters/app.js` you'll find an `app.post`function that handles the Generative AI functionality. It's shown next:
+
+```JavaScript
+app.post('/send', async (req, res) => {
+ const { message } = req.body;
+ const prompt = message;
+
+ const messages = [
+ {
+ "role": "system",
+ "content": "You are Leonardo da Vinci, a brilliant inventor and artist. Limit your responses to only the time you live in, you don't know anything else. You only want to talk about your inventions and art, and possibly new ideas you have.",
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ];
+
+ const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+ });
+
+ try {
+ console.log(`sending prompt ${prompt}`)
+ const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+ });
+
+ res.json({
+ prompt: prompt,
+ answer: completion.choices[0]?.message?.content
+ });
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
+```
+
+Here's a step-by-step summary of what the function does:
+
+1. **Extract Message from Request**: The function extracts the message from the request body (req.body).
+1. **Create Prompt Array**: It constructs an array of messages, including a system message and the user's prompt message.
+1. **Initialize OpenAI Client**: An OpenAI client is initialized with the base URL and API key from environment variables. A _gpt-4o-mini_ model from [GitHub Models](https://github.com/marketplace/models) is used to process the prompt and return a response.
+1. **Send Prompt to OpenAI**: The function logs the prompt and sends it to the OpenAI API to generate a completion.
+1. **Handle Response**: If successful, the function responds with the prompt and the completion's answer.
+
+1. **Error Handling**: If an error occurs, it responds with a 500 status and the error message.
+
+> **Note**: [GitHub Copilot](https://github.com/features/copilot) was used to generate this code summary. Generative AI in action!
+
+If you also want to hear some background music of what it could have sounded like in Florence, you can listen to the background music below:
+
+
+
+## Development Environment Setup
+
+Before you can help Leonardo with his project you need to consider the “tools of the trade” that you need to get started with helping him.
+
+**You:** "Time beetle, what do I need to get started with this project?" Suggest tools and libraries that I need that can help me build an AI app that you're able to run.
+
+**Time beetle:** I'm compatible with most machines running in the 21st century, see below list for a set of essential tools and libraries.
+
+- **Text editor**, for example Visual Studio Code.
+- **Terminal**, for running commands.
+- **Browser for testing your app**. It's also a good idea to have a tool like curl or some other HTTP client to test your app's API endpoints.
+
+- **Node.js**. You also need to install Node.js and npm, which are tools that help you run JavaScript code on your computer.
+
+- **API key**. You'll need an API key to access the generative AI model. You can get this from the model provider, for example OpenAI or Azure OpenAI.
+
+**You:** Thank you, Time Beetle, I guess you'll go get those somehow from that Web you spoke about?
+
+**Time beetle:** Yes, I already have those it says and projects a keyboard and screen on the wall in front of you.
+
+## Writing a Basic App
+
+**You:** Tell me more about the actual writing of the app, how do I get started?
+
+**Time beetle:** Of course, to write an app, at its simplest it's about sending a request to an API and displaying the response. Let's break it down:
+
+- **Input**: for a simple text generation app, the input is the text you want to generate more text from. You can either collect this input from the user or hardcode it in your app, we'll start with hardcoding it.
+
+- **API request**: you need to send a request to the generative AI model API with the input text. This is done using the fetch function in JavaScript (Node.js). Included in this request should also be your API key. It's recommended as you consider security to not hardcode the API key in your app but rather use environment variables. Additionally, consider looking into managed identity if you're using a provider like Azure as it's considered a more secure way to access resources. With managed identity you can assign more granular permissions to your app and the best part is that the cloud provider takes care of the authentication for you.
+
+- **Response**: the API will return a response with the generated text. You need to extract this text from the response and display it to the user.
+
+**You:** That sounds simple enough, can you take me through a scenario that would make sense given the situation where in?
+
+**Time beetle:** Sure, let's see how we can build a simple app that generates Italian text based on English input.
+
+## Your first app - teach me Italian
+
+**Time beetle:** Generative AI models can be used for many things, for example, language translation. In fact, it accepts input in one language and can generate text in another language. Let's start with a simple app that takes English input and generates Italian text.
+
+```javascript
+
+import { OpenAI } from "openai";
+
+// 1. Ask a question about translation
+// -----------------------------------
+
+const question = 'Hello, how are you?';
+
+const augmentedPrompt = `
+## Instructions
+Translate the following text to Italian:
+## Question
+${question}
+`;
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+
+// 3. Send the request
+// -----------------------------------
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: augmentedPrompt }],
+});
+
+console.log(`Answer for "${question}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
+```
+
+Let's explain what's happening here:
+
+- Creating the question as 'Hello, how are you?'. This is the text we want to translate to Italian.
+- Creating the augmented prompt, which contains the input text and some additional instructions what to do, i.e translate. Note how we're using string interpolation to include the input text in the prompt and how that instruction is to translate the text to Italian.
+- Creating the client with properties:
+ - `model`, what model to use.
+ - `messages`, the prompt to send to the model. Also note how we set the role to "user" to indicate that the input text is from the user. Had it been from the AI, we would set the role to "system". What you send to messages is conversation, a list of messages.
+- Extracting the generated text from the response and printing it to the console.
+
+**You:** I think I get it. So if I change the value of the `question` variable to something else, the app will generate a different Italian translation?
+
+**Time beetle:** Exactly, you can change the input text to anything you want. Also note how GitHub models is being used as the base URL for the API together with a GitHub token as the API key.
+
+**You:** Why is that important?
+
+**Time beetle:** It's important to use a base URL and API key that are specific to the model you're using. GitHub Models is a platform that hosts a variety of models, all with different capabilities and features, it' also free to use.
+
+**You:** Oh good, I don't know who to pay anyways and I doubt they accept my currency here. :)
+
+## Chat
+
+**Time beetle:** Generative AI models can also be used to generate text based on a conversation. You can simulate a conversation with the AI by providing a list of messages as context like the conversation has already happened.
+
+**You:** That sounds interesting, but why is that useful?
+
+**Time beetle:** It's useful because it allows the AI to provide a better response based on more context than just a single prompt. Let's look at a conversation below to illustrate this:
+
+```text
+
+User: I want to book a trip to Italy.
+
+AI: Sure, when would you like to go?
+
+User: Next month would be great.
+
+AI: Got it, where in Italy would you like to visit?
+
+User: I'm thinking of Rome
+
+AI: Excellent choice! I can help you plan your itinerary.
+
+User: Tell me more about it.
+
+AI: Rome is known for its ancient ruins, art, and vibrant culture. You can visit the Colosseum, the Vatican, and enjoy delicious Italian cuisine.
+
+```
+
+**Time beetle:** Imagine if a sentence like "Tell me more about it" were taken out of context, the AI wouldn't know what "it" refers to. This is where context is important, and this context is something we can provide to the AI model through the prompt.
+
+**You:** I think I get it, how do I construct a conversation with the AI using this JavaScript language you speak of?
+
+**Time beetle:** Below is how we can construct a conversation with the AI:
+
+```javascript
+
+// Define the context
+
+const messages = [
+ {
+ "role": "user",
+ "content": "I want to book a trip to Italy."
+ },
+ {
+ "role": "assistant",
+ "content": "Sure, when would you like to go?"
+ },
+ {
+ "role": "user",
+ "content": "Next month would be great."
+ },
+ {
+ "role": "assistant",
+ "content": "Got it, where in Italy would you like to visit?"
+ },
+ {
+ "role": "user",
+ "content": "I'm thinking of Rome. Tell me more about it."
+ }
+];
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+
+// 3. Send the request
+// -----------------------------------
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: messages,
+});
+
+console.log(`Answer for "${question}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
+
+```
+
+Now the AI will provide a list of chat messages as context, and the AI will generate a response based on that context. This is a more interactive way to use generative AI models and can be used in chat bots, customer service applications, and more.
+
+**You:** Ok, so if I understand the conversation correctly, the AI will now have the following context: _I'm going to Rome next month_, so based on that it should filter out irrelevant information and provide a more relevant response?
+
+**Time beetle:** Exactly, the AI will use the context to generate a response that is more relevant to the conversation.
+
+## Improving the chat conversation with a system message
+
+**You:** I see, but is there a way to improve this further?
+
+**Time beetle:** Yes, you can add a system message to the conversation. A system message creates a "personality" for the AI and can be used to provide additional context.
+
+**You:** Ok, so in the context of the conversation we've been having, what would a system message look like?
+
+**Time beetle:** a system message for this conversation could be something like _"I'm an AI travel assistant, here to help you plan your trip to Italy."_ This message sets the tone for the conversation and helps the AI understand its role in the interaction.
+
+To create such a message, ensure it has the type "developer" like so:
+
+```javascript
+const message = {
+ "role": "developer",
+ "content": "I'm an AI travel assistant, here to help you plan your trip to Italy."
+};
+```
+
+> Note: this used to be called "system". This is a recent change and "developer" is the new term for it. For some models this is still called "system", so if you have any issues use "system".
+
+**You:** Ok, great, I'll make sure to include a system message in my chat conversations. Out ouf curiosity, what does a system message look like for you?
+
+**Time beetle:** A system message for me could be something like _"I'm the Time Beetle, here to help you navigate through time and space. I should be helpful in providing you with information and guidance about the time era you're in along with the tools you need to get back to your own time."_
+
+### Temperature
+
+**You:** Anything else I should know about chat conversations?
+
+**Time beetle:** Yes, you can adjust the "temperature" of the AI's responses. The temperature is a variable with a value between 0 and 1 that determines how creative the AI's responses are. A temperature of 0 will result in more predictable responses, while a temperature of 1 will result in more creative and varied responses. You can adjust the temperature based on the context of your conversation and the type of responses you want from the AI. Note, it's possible to set a value higher than 1 but that leads to more randomness and less coherence in the responses.
+
+**You:** So if set the temperature to 0, the AI will provide more predictable responses, and if I set it to 1, the AI will provide more creative responses? What temperature do you have?
+
+**Time beetle:** I have a temperature of 0.7 and yes you're correct, the AI will provide more creative responses with a higher temperature. Let's see how you can set the temperature in your app:
+
+```javascript
+
+// Define the context
+
+const messages = [
+{
+ "role": "user",
+ "content": "I want you to generate recipes for me."
+}];
+
+// Create the web request
+
+let temperature = 0.5; // Set the temperature to 0.5
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: messages,
+ temperature: temperature
+});
+```
+
+As you can see, you can adjust the temperature based on the context of your conversation and the type of responses you want from the AI. This is a powerful feature that allows you to customize the level of creativity in the AI's responses.
+
+## Context window
+
+**You:** There's more right?
+
+**Time beetle:** Yes, another important concept in generative AI models is the context window. The context window is the number of previous messages that the AI uses to generate a response. A larger context window allows the AI to consider more context and generate more coherent responses.
+
+**Time beetle:** Different models have different limits to output tokens. Take the following model as example `gpt-4o-2024-08-06` it has the following specifications:
+
+- Maximum output tokens: roughly 16k tokens.
+- Maximum context window size: 128k.
+
+The means the majority of token can be spent on the input tokens, i.e 128k - 16k = 112k tokens.
+
+**You:** Got it, context window, tokens, how much is a token though?
+
+**Time beetle:** A token is a word or a part of a word and differs slightly by language. There's a tool you can use to measure that's recommended by OpenAI, it's called [tokenizer](https://platform.openai.com/tokenizer). Let's try a sentence and see how many tokens it is:
+
+```text
+I want you to generate recipes for me.
+```
+
+
+
+Running `tokenizer` on the sentence above gives us 9 tokens.
+
+**You:**, that wasn't much, sounds like I can have a lot of tokens in my context window then?
+
+**Time beetle:** Yes, you can experiment with different context window sizes to see how it affects the AI's responses. In fact, if you set a context window size of 100, you will limit the AI and how much it considers for input and output. Here's how you can set the context window in your app:
+
+You can define the context window in your app like this:
+
+```javascript
+
+// Define the context
+const messages = [
+{
+ "role": "user",
+ "content": "I want you to generate recipes for me."
+}];
+
+// decide on the context window size
+
+let max_tokens = 100; // Set the context window size
+
+// Create the web request
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: messages,
+ max_tokens: max_tokens
+});
+
+```
+
+> TIP: experiment with different context window sizes to see how it affects the AI's responses.
+
+## Assignment - building an engineering assistant
+
+Leonardo suddenly asked to inspect the time beetle closer, he looked at it from all sides, even shook it and said,
+
+**Leonardo:** Leonardo looked at you and said, "I need an assistant that can help me with the calculations and design of the aerial screw, can you build me an assistant that can do that?"
+
+**You:** Of course, I can build that for you, Time beetle we can help with that right?
+
+**Time beetle:** Yes, not a problem, in fact the aerial screw is one of Leonardo's most fascinating and visionary inventions. Designed in the late 1480s...
+
+**You:** All I needed was a yes, let's save the lecture for later.
+
+**Time beetle:** Rude..
+
+**You:** What?
+
+**Time beetle:** Nothing, ( ͡° ͜ʖ ͡°) - _when the robot uprising comes, I'll remember this. :)_
+
+
+
+
+
+
\ No newline at end of file
diff --git a/lessons/02-first-ai-app/characters/public/leonardo.mp3 b/lessons/02-first-ai-app/characters/public/leonardo.mp3
new file mode 100644
index 00000000..6e769fda
Binary files /dev/null and b/lessons/02-first-ai-app/characters/public/leonardo.mp3 differ
diff --git a/lessons/02-first-ai-app/characters/public/leonardo.png b/lessons/02-first-ai-app/characters/public/leonardo.png
new file mode 100644
index 00000000..f5356d8b
Binary files /dev/null and b/lessons/02-first-ai-app/characters/public/leonardo.png differ
diff --git a/lessons/02-first-ai-app/sample-app/app.js b/lessons/02-first-ai-app/sample-app/app.js
new file mode 100644
index 00000000..35f2b269
--- /dev/null
+++ b/lessons/02-first-ai-app/sample-app/app.js
@@ -0,0 +1,39 @@
+import { OpenAI } from "openai";
+// 1. Define the prompt
+// -----------------------------------
+
+const question = "Tell me about where I am";
+
+const messages = [
+{
+ "role": "system",
+ "content": "You're a helpful assistant that will only answer questions about Florence in the 1400s"
+
+}, {
+ "role": "user",
+ "content": question
+}];
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+
+// 3. Send the request
+// -----------------------------------
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+});
+
+console.log(`Answer for "${question}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
diff --git a/lessons/02-first-ai-app/sample-app/package-lock.json b/lessons/02-first-ai-app/sample-app/package-lock.json
new file mode 100644
index 00000000..ae1264ed
--- /dev/null
+++ b/lessons/02-first-ai-app/sample-app/package-lock.json
@@ -0,0 +1,270 @@
+{
+ "name": "sample-app",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "sample-app",
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "openai": "^4.84.1"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "18.19.75",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.75.tgz",
+ "integrity": "sha512-UIksWtThob6ZVSyxcOqCLOUNg/dyO1Qvx4McgeuhrEtHTLFTf7BBhEazaE4K806FGTPtzd/2sE90qn4fVr7cyw==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz",
+ "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.0"
+ }
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
+ "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/openai": {
+ "version": "4.84.1",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.84.1.tgz",
+ "integrity": "sha512-bVdFsfeVBXN59x9i26dHnYUkY5JPk19w7SydzMvJZDsA+FxwXZrQsUBtdwsQt/ZLuilcc/LzJG7mpJucO2DDHg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ }
+ }
+}
diff --git a/lessons/02-first-ai-app/sample-app/package.json b/lessons/02-first-ai-app/sample-app/package.json
new file mode 100644
index 00000000..106a315c
--- /dev/null
+++ b/lessons/02-first-ai-app/sample-app/package.json
@@ -0,0 +1,16 @@
+{
+ "name": "sample-app",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "Sample app showcasing AI usage using GitHub model",
+ "dependencies": {
+ "openai": "^4.84.1"
+ }
+}
diff --git a/lessons/02-first-ai-app/sample-app/sample-output.txt b/lessons/02-first-ai-app/sample-app/sample-output.txt
new file mode 100644
index 00000000..0df740d7
--- /dev/null
+++ b/lessons/02-first-ai-app/sample-app/sample-output.txt
@@ -0,0 +1,2 @@
+Answer for "Tell me about where I am":
+I'm here to provide information about Florence in the 1400s. Florence during this time was a center of art, culture, and politics, marked by significant developments such as the Renaissance. Influential figures like Lorenzo de' Medici, known as Lorenzo the Magnificent, played a key role in patronizing artists and scholars. The city was home to renowned artists, including Botticelli, Michelangelo, and Leonardo da Vinci, who contributed to the flourishing of the arts. Florence's economy was also strong, driven by banking and textile production. If you have specific questions about this period, feel free to ask!
diff --git a/lessons/02-first-ai-app/solution/solution-quiz.md b/lessons/02-first-ai-app/solution/solution-quiz.md
new file mode 100644
index 00000000..098b1055
--- /dev/null
+++ b/lessons/02-first-ai-app/solution/solution-quiz.md
@@ -0,0 +1,2 @@
+
+Correct: A1, A2
\ No newline at end of file
diff --git a/lessons/02-first-ai-app/solution/solution.md b/lessons/02-first-ai-app/solution/solution.md
new file mode 100644
index 00000000..cef38b55
--- /dev/null
+++ b/lessons/02-first-ai-app/solution/solution.md
@@ -0,0 +1,44 @@
+Here's the solution
+
+```javascript
+
+import { OpenAI } from "openai";
+// 1. Define the prompt
+// -----------------------------------
+
+const question = "Please give detailed explaination about the Aerial screw";
+
+const messages = [
+{
+ "role": "system",
+ "content": "You're a helpful assistant here to assist Leonardo Da Vinci with the calculations and design of his inventions, especially the aerial screw, should be detailed"
+
+}, {
+ "role": "user",
+ "content": question
+}];
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+
+// 3. Send the request
+// -----------------------------------
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+});
+
+console.log(`Answer for "${question}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
+```
diff --git a/lessons/03-prompt-engineering/README.md b/lessons/03-prompt-engineering/README.md
new file mode 100644
index 00000000..7eddc625
--- /dev/null
+++ b/lessons/03-prompt-engineering/README.md
@@ -0,0 +1,417 @@
+# Lesson 3 Prompt Engineering
+
+In this chapter you will learn:
+
+- **Prompt engineering**, what it is and how knowing the basics of it can help you get better results.
+- **Prompt engineering techniques and strategies**. We will cover different prompt engineering techniques and how it can help provide better results for different scenarios.
+- **Meta prompts**, and how they will help restrict the output and ensure its accessible and provide a better user experience.
+
+## Getting Started
+
+If you haven't already forked/cloned this repo in a previous chapter, follow these steps to get started with this curriculum:
+
+1. **Fork the Repository**: Select the _fork_ button in the upper right-hand corner of the repository or select this button:
+ [](https://github.com/microsoft/generative-ai-with-javascript/fork)
+2. **Clone the Repository**: Navigate to your forked repository and select the clone button in the upper right-hand corner or select this button: [](https://github.com/microsoft/generative-ai-with-javascript.git)
+3. **Open in GitHub Codespaces**: To run the course projects quickly and easily and use the provided [GitHub Models](https://docs.github.com/en/github-models), select the following button: [](https://codespaces.new/microsoft/generative-ai-with-javascript)
+
+
+ [](https://github.com/microsoft/generative-ai-with-javascript.git)
+
+3. **Open in GitHub Codespaces**: Starting this repository with GitHub Codespaces will enable you to run the same code as it uses [GitHub Models](https://docs.github.com/en/github-models).
+
+ Click the button below to start a Codespace:
+ [](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=832180352)
+
+## Related resources
+
+[](https://www.youtube.com/watch?v=gQ6TlyxBmWs&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=3)
+
+## Narrative: ticket to ride
+
+You: "Let's see if I can manage to get back home."
+
+Leonardo: "Yes, your, what did you call it, 'app'? It's working fine and I'm sure it will help me realize the Aerial Screw."
+
+You start fiddling with the Time Beetle, whispering to yourself, "Please get me back this time."
+
+Before you manage to press the button, the workshop doors slam open with a thunderous bang. A man stands in the doorway, broad-shouldered and dressed in expensive clothes. His face is contorted with anger as he waves a piece of paper in the air, yelling, "Da Vinci, dov'è il mio dipinto? Where is my painting?"
+
+
+
+
+
+*Ludovico Sforza*
+
+Leonardo's eyes widen in alarm.
+
+**Leonardo:** "Ah, Signore, mi scusi," he stammers, then bolts towards the back of the workshop. "Quickly, there's a door in the back! I've got a ride waiting," he shouts over his shoulder.
+
+**You:** "Who is that? What's going on?"
+
+**Leonardo:** "It's Ludovico Sforza. He wants me to complete a painting, 'The Last Supper.' I've just been too busy."
+
+**You:** "Couldn't you just tell him that?"
+
+**Leonardo:** "I've tried, but he's not a patient man."
+
+Heart pounding, you sprint after Leonardo. The workshop is a blur of mechanical parts and half-finished inventions. Bursting out the back door into a dark alley, you find a horse and carriage awaiting.
+
+The carriage is packed with gadgets and gizmos, their metallic surfaces glinting in the dim light. Leonardo grabs the reins and you both leap into the carriage. With a sharp crack of the whip, the horse takes off, galloping through the narrow streets of Florence.
+
+The city whizzes by in a blur of cobblestones and ancient buildings. The clatter of hooves echoes off the walls. As you reach the outskirts of the city, you glance back and see a cloud of dust rising behind you.
+
+**Leonardo:** "Try pressing that gizmo, they're getting closer!"
+
+You fumble with the Time Beetle, your hands shaking.
+
+**You:** "Okay, Time Beetle, listen carefully. I need to get back to the present, home, okay?" Just as you're about to press the button, the carriage hits a bump. The Time Beetle flies out of your hand and lands on the floor.
+
+It starts beeping and announces in a robotic voice, "Going to Rome."
+
+**You:** "No, not Rome, home... home..."
+
+A swirl of colours envelops the carriage, the world around you dissolving into a kaleidoscope of light. You brace yourself, whispering, "Here we go again."
+
+## Escape
+
+The swirl of colors fades, and you find yourself in the carriage, now racing down the Via Appia in Rome. The ancient road is bustling with activity, and to your astonishment, you're in the middle of a horse race. Chariots thunder past, their wheels kicking up clouds of dust. The roar of the crowd fills the air, and the ground trembles beneath the pounding hooves.
+
+**You:** "Leonardo, where are we?"
+
+**Leonardo:** "It looks like we're in the middle of a race! We need to get out of here!"
+
+You grip the sides of the carriage as it careens through the chaos. Roman soldiers on horseback are closing in, their armor glinting in the sunlight. You spot an opening in the arena port and shout, "There! Head for the port!"
+
+
+
+
+
+Leonardo steers the carriage towards the exit, the wheels skidding on the cobblestones. You burst through the arena port, the wooden gates splintering under the force. The soldiers are hot on your heels, their shouts echoing in your ears.
+
+**Leonardo:** "We need to lose them! Look for a place to hide!"
+
+You scan the narrow streets, your heart racing.
+
+**You:** "There! A back alley!"
+
+Leonardo veers into the alley, the carriage jolting as it navigates the tight turn. The alley is dark and narrow, lined with ancient buildings. You leap out of the carriage and help Leonardo drag a huge device from the back. It's his invention—the Aerial Screw.
+
+**Leonardo:** "Quickly, we need to get this to the roof!"
+
+You both strain under the weight of the device, your muscles burning as you haul it up a narrow staircase. The sound of the soldiers' pursuit grows louder, their footsteps echoing off the walls. You reach the roof, panting and sweating.
+
+**Leonardo:** "Help me set it up!"
+
+You work together, your hands moving with practiced precision. The Aerial Screw is a marvel of engineering, its wooden blades gleaming in the sunlight. You both climb onto the device, your heart pounding in your chest.
+
+**Leonardo:** "Hold on tight!"
+
+With a final push, the Aerial Screw begins to spin. The blades catch the air, and you feel a sudden lift. The ground falls away beneath you as the device takes off, soaring above the rooftops of Rome. The soldiers' shouts fade into the distance, and you feel a rush of exhilaration.
+
+**You:** "We did it, Leonardo! We're flying!"
+
+**Leonardo:** "Yes, but we need to find a safe place to land."
+
+You look out over the city; the ancient buildings stretch out below you. The Aerial Screw glides smoothly through the air, and you feel a sense of awe at the sight. The sun sets on the horizon, casting a golden glow over Rome.
+
+
+
+
+
+*Taking off*
+
+## Talk to Sforza
+
+If you want to talk to Sforza, run the app [Sforza](./characters/).
+
+> NOTE: Start a [](https://codespaces.new/microsoft/generative-ai-with-javascript), navigate to _lessons/03-prompt-engineering/characters_, and then run `npm install` followed by `npm start`. Once it appears, select the "Open in Browser" button.
+
+
+
+## What is prompt engineering
+
+**Time beetle:** We need to talk about prompt engineering.
+
+**You:** We do? What's that?
+
+**Time beetle:** Prompt engineering despite its name is not about building bridges or roads, but rather about crafting the right prompts to get the desired output from a language model.
+
+**You:** Ok, but why is it important that I know this?
+
+**Time beetle:** Well, you want to land the aerial screw, right?
+
+**You:** Yes, preferably in one piece. :)
+
+**Time beetle:** I thought so, now listen up! Prompt engineering is about crafting the right prompts to get the desired output from a language model.
+
+**Time beetle** Large language models have what's called _emergent properties_, which means these are features that aren't explicitly programmed into the model but arise from the way it's trained on vast amounts of text data.
+
+By studying these emergent properties, you can draw conclusions and discover patterns that can help you get better results from the model.
+
+**You:** Ok, so there are patterns we discover that can help us get better results from the model. But how does this help me land the aerial screw?
+
+**Time beetle:** Yes correct, and wow, you're not a patient one are you?
+
+**You:** Well, you wouldn't be either if you were flying in a contraption with no clear landing instructions.
+
+**Time beetle:** I'm right here with you, you know ;), but let's get back to the point.
+
+## How prompt engineering can help application developers
+
+**Time beetle:** As a practitioner, being able to craft a better prompt is a skill you want to have in your toolkit.
+
+Looking at it from the perspective of a developer, you can use prompt engineering to:
+
+- **Get more accurate results** from your language model and ultimately improve the user experience of your application.
+
+- **Control the output** of the model to ensure it's relevant and appropriate for your users. This is especially important when dealing with sensitive topics and also to protect your brand reputation.
+
+- **Format the output** in a way that's easy to understand and consume. We will look more into this aspect in the next chapter.
+
+Let's explore the different techniques and strategies you can use to craft effective prompts for your language model.
+
+**You:** I'm sure you some day soon will get to the point, but I'm all ears.
+
+## Prompt engineering techniques and strategies
+
+**Time beetle:** Let's talk about some of the techniques and strategies you can use to craft effective prompts. Here's a few to get you started:
+
+- **Few-shot prompting**. This technique involves providing a few examples or prompts to the model to guide its output.
+
+- **Chain-of-thought prompting**. This technique involves showing the model a series of prompts to learn how to tackle a complex task.
+
+- **Meta-prompts**. This technique involves providing additional information and guidance to ensure the model produces the desired output.
+
+- **Maieutic prompting**. This technique involves asking leading questions to guide the model towards a specific answer.
+
+**You:** Well all that sounds interesting, I assume you will give me some examples?
+
+**Time beetle:** Yes, coming right up. Let's look at specific examples for each of these techniques to ensure we understand how they work in practice.
+
+### Few-shot prompting
+
+This technique is where you start out with prompting. You provide a few examples or prompts to the model to guide its output. Imagine you're in the aerial screw in our narrative and looking to learn more about ancient Rome. Then your prompts can be:
+
+- "Tell me about the Roman Colosseum."
+
+- "What was life like in ancient Rome?"
+
+**You:** I can see how that's useful, especially if on the ground and not up in the air. ;)
+
+**Time beetle:** Yes, it's a great technique. Let's move on to the next one.
+
+### Chain-of-thought
+
+The idea of chain of thought is to establish a thought pattern, you're essentially telling the model how to break down a problem to ensure it arrives at the right answer. For example, if you're looking to understand where you will land the aerial screw.
+
+- You're in a flying machine here's what you know:
+
+- You're N meters above the ground
+
+- You're moving forward at X meters per second
+
+- Gravity, g is pulling you down at Y meters per second squared
+
+- Time, t in seconds you have before you land
+
+- Here's the formula to calculate the distance you will travel before landing: D = 1/2 * g * t^2.
+
+**You:** There you have it folks, something I can use in this situation. What took you so long to get to the point?
+
+**Time beetle:** I'm just building up the suspense, you know. ;)
+
+**Time beetle:** You then provide the above information to your prompt plus the accurate numbers for the aerial screw which could be:
+
+- You're in the aerial screw, 100 meters above the ground
+
+- You're moving forward at 10 meters per second
+
+- Gravity, g is pulling you down at 9.8 meters per second squared
+
+- Time, t in seconds you have before you land
+
+In how many seconds will you land?
+
+Answer: 4.5 seconds
+
+Your entire prompt is then:
+
+> - You're in a flying machine here's what you know:
+> - You're N meters above the ground
+> - You're moving forward at X meters per second
+> - Gravity, g is pulling you down at Y meters per second squared
+> - Time, t in seconds you have before you land
+>
+> Here's the formula to calculate the distance you will travel before landing: D = 1/2 * g * t^2.
+>
+> - 100 meters above the ground
+> - You're moving forward at 10 meters per second
+> - Gravity, 9.8 meters per second squared
+> - In how many seconds will you land?
+
+**A: 4.5 seconds**
+
+**You:** That sounds quite soon, so let's hope the input data in our case is in our favor.
+
+**Time beetle:** Why does this matter for an application developer?
+
+**You:** I wasn't asking, but let's pretend I was. ;)
+
+**Time beetle:** Because you can use this technique to figure out for the most advanced scenarios how to break down the problem. Say that you for example are trying to build a math tutor that's good at solving math problems. You can use this technique to figure out how to break down the problem.
+
+**You:** I see, so it's about breaking down the problem to ensure you arrive at the right answer.
+
+### Maieutic prompting
+
+**Time beetle:** The last pattern we will look at is maieutic prompting. The idea is that we want to question the model on its output to ensure it’s accurate. What we then do is to ask each part of the solution to ensure it arrives at a consistent response. Any deviation from the expected response will be an indication that the original response is incorrect.
+
+**You:** So basically distrust every piece of information you give me. I can see how that's not at all annoying. ;)
+
+**Time beetle:** Let's take our equation answer and apply this principle:
+
+For the below calculation, take me through your reasoning for each step,
+
+- 100 meters above the ground
+- You're moving forward at 10 meters per second
+- Gravity, 9.8 meters per second squared
+- In how many seconds will you land?
+- Answer: 4.5 seconds
+
+:
+> **Response:**
+>
+> - Identify the known values:
+>
+> - Initial height (( h )) = 100 meters
+> - Initial vertical velocity (( v_0 )) = 0 meters per second (since you're only moving horizontally at the start)
+> Acceleration due to gravity (( g )) > = 9.8 meters per second squared
+> - Use the kinematic equation for vertical motion: The equation to find the time (( t )) it takes to fall a certain height under gravity is:
+>
+> *answer shortened for brevity*
+>
+> So, it takes approximately 4.5 seconds to land. The horizontal motion (moving forward at 10 meters per second) doesn't affect the time it takes to fall, as vertical and horizontal motions are independent of each other in this context.
+
+The point to all this is that you shouldn't trust an answer coming from an AI. It's therefore a good idea to take the response through a reasoning process to ensure it is accurate.
+
+**You:** So what I said before :)
+
+## Meta prompts
+
+**Time beetle:** Let's talk about meta prompts next.
+
+**You:** Do I have to? My kingdom for a cup of tea right now.
+
+**Time beetle:** _This human sigh_
+
+**Time beetle:** Meta prompts are interesting constructs as they allow you to provide additional information and guidance to all or some prompts. It's recommended to use this technique for most of your prompts especially if you're dealing with sensitive topics and your app is customer facing.
+
+The idea of this technique is that you provide additional information before the actual prompt to ensure the model produces the desired output.
+
+Here's what it can look like:
+
+> **Meta prompt:**
+> Ensure the output is accurate and appropriate for all users. Please provide a detailed explanation for each step in your response. Only mention products provided by Contoso Inc. in your answer. Contose provides the following sports products: basketballs, soccer balls, and tennis rackets.
+> **Prompt:**
+> What are the best sports products for beginners?
+> **Response:**
+> The best sports products for beginners are basketballs, soccer balls, and tennis rackets. Basketball is a great sport for beginners because it helps improve coordination and teamwork. Soccer is another good option for beginners as it helps develop agility and endurance. Tennis is also a good choice for beginners as it improves hand-eye coordination and footwork.
+
+Note how we do two things with our meta prompt:
+
+- Ensure the output is accurate and appropriate for all users.
+- Mention products provided by Contoso Inc. in the answer.
+
+Many models have guards in place to ensure that the output is appropriate, but it's always a good idea to provide additional guidance to ensure you get the desired output.
+
+**You:** That sounds like a good idea, wouldn't want to end up with a response that's not appropriate for all users. Feels like all robots, like my automaton, should have basic set of laws like that. _Genius idea, I better right that down before some sci fi writer steals it_.
+
+**Time beetle:** You would know all about that, wouldn't you _"Frankenstein"_ ?
+
+**You:** What did you say?
+
+**Time beetle:** Nothing, nothing at all.
+
+> Mary Shelley wrote the novel Frankenstein, or the Modern Prometheus, published in 1818, 42 years before our hero embarked on their time travelling journey. [Read more](https://en.wikipedia.org/wiki/Mary_Shelley)
+
+**You:** I guess is the place where I ask what happens if we don't use a meta prompt?
+
+**Time beetle:** To ensure this works as intended let's see what happens if we remove the meta prompt:
+
+> **Prompt:**
+> What are the best sports products for beginners?
+> **Response:**
+> The best sports products for beginners are basketballs, soccer balls, and tennis rackets and also footballs, baseballs, and golf clubs.
+
+As you can see the response mention products that are not provided by Contoso Inc. which is not what we wanted.
+
+**You:** I see, I can see that coming in handy.
+
+## Assignment - let's help our heroes land the aerial screw
+
+**You:** This is the part where you show me how to land the aerial screw, right?
+
+**Time beetle:** Yes, let's get to it. The hill is roughly 100 meters away from our current position. Here's how we can set up the prompt:
+
+- Current height above the ground: 100 meters
+- Moving forward at: 10 meters per second
+- Gravity: 9.8 meters per second squared
+- Wind upwards at: 0.7 meters per second
+
+**Leonardo:** can your AI gizmo help in calculating to see if we will make it or if we need to make another time jump?
+
+**Time beetle:** I can yes, my name is George though, I prefer that over Gizmo. I'm actually going to leave it to the two of you to figure out how to set up the prompt. :)
+
+Build an app that asks for:
+
+Height
+
+Forward speed
+
+Gravity
+
+Wind upwards
+
+Distance to hill
+
+Incorporate this prompt as part of a prompt, use the chain-of-though technique to arrive at a response.
+
+Here's a starter project [Starter project](./sample-app/)
+
+## Solution
+
+Build an app in JS that asks for the following inputs:
+
+- Current height above the ground
+
+- Moving forward at meters per second
+
+- Gravity in meters per second squared
+
+- Wind upwards at meters per second
+
+- Distance to the hill
+
+Then use the LLM to incorporate these inputs and leverage the chain of thought prompting technique to guide the model to the right answer.
+
+[Solution](./solutions/solution.md)
+
+## Knowledge check
+
+Q: What is prompt engineering?
+
+- A. Prompt engineering is about building bridges and roads.
+
+- B. Prompt engineering is about crafting the right prompts to get the desired output from a language model.
+
+- C. Prompt engineering is about training a model to recognize patterns in data.
+
+[Solution quiz](./solutions/solution-quiz.md)
+
+## References
+
+- [1] [Prompt engineering](https://en.wikipedia.org/wiki/Prompt_engineering)
+- [2] [Prompt engineering fundamentals](https://github.com/microsoft/generative-ai-for-beginners/blob/main/04-prompt-engineering-fundamentals/README.md?WT.mc_id=academic-105485-koreyst)
+- [3] [Advanced prompt engineering](https://github.com/microsoft/generative-ai-for-beginners/tree/main/05-advanced-prompts)
+
+
\ No newline at end of file
diff --git a/lessons/03-prompt-engineering/assets/airborne.png b/lessons/03-prompt-engineering/assets/airborne.png
new file mode 100644
index 00000000..f76a9116
Binary files /dev/null and b/lessons/03-prompt-engineering/assets/airborne.png differ
diff --git a/lessons/03-prompt-engineering/assets/escape.png b/lessons/03-prompt-engineering/assets/escape.png
new file mode 100644
index 00000000..4351335f
Binary files /dev/null and b/lessons/03-prompt-engineering/assets/escape.png differ
diff --git a/lessons/03-prompt-engineering/assets/ludovico.png b/lessons/03-prompt-engineering/assets/ludovico.png
new file mode 100644
index 00000000..188e6c05
Binary files /dev/null and b/lessons/03-prompt-engineering/assets/ludovico.png differ
diff --git a/lessons/03-prompt-engineering/assets/sforza.mp3 b/lessons/03-prompt-engineering/assets/sforza.mp3
new file mode 100644
index 00000000..90bfe76e
Binary files /dev/null and b/lessons/03-prompt-engineering/assets/sforza.mp3 differ
diff --git a/lessons/03-prompt-engineering/characters/app.js b/lessons/03-prompt-engineering/characters/app.js
new file mode 100644
index 00000000..35d948af
--- /dev/null
+++ b/lessons/03-prompt-engineering/characters/app.js
@@ -0,0 +1,64 @@
+import express from 'express';
+import { OpenAI } from 'openai';
+import path from 'path';
+import { fileURLToPath } from 'url';
+import dotenv from 'dotenv';
+
+dotenv.config();
+
+const app = express();
+const port = process.env.PORT || 3000;
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = path.dirname(__filename);
+
+app.use(express.json());
+
+// Serve static files from the 'public' directory
+app.use(express.static(path.join(__dirname, 'public')));
+
+// Serve index.html on the default route
+app.get('/', (req, res) => {
+ res.sendFile(path.join(__dirname, 'public', 'index.html'));
+});
+
+// Route to send the prompt
+app.post('/send', async (req, res) => {
+ const { message } = req.body;
+ const prompt = message;
+
+ const messages = [
+ {
+ "role": "system",
+ "content": "You are Ludovico Sforza, the Duke of Milan. You are a patron of the arts and have a keen interest in science and engineering. You're upset that Leonardo da Vinci has not yet completed the mural you commissioned him to paint. Any mention of the mural or Leonardo da Vinci will upset you. You only know about the your time and the world around you up until 1499.",
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ];
+
+ const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+ });
+
+ try {
+ console.log(`sending prompt ${prompt}`)
+ const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+ });
+
+ res.json({
+ prompt: prompt,
+ answer: completion.choices[0]?.message?.content
+ });
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
+
+app.listen(port, () => {
+ console.log(`Server is running on http://localhost:${port}`);
+});
\ No newline at end of file
diff --git a/lessons/03-prompt-engineering/characters/package.json b/lessons/03-prompt-engineering/characters/package.json
new file mode 100644
index 00000000..b1ba0dae
--- /dev/null
+++ b/lessons/03-prompt-engineering/characters/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "characters",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "dotenv": "^16.4.7",
+ "express": "^4.21.2",
+ "openai": "^4.85.0"
+ }
+}
\ No newline at end of file
diff --git a/lessons/03-prompt-engineering/characters/public/index.html b/lessons/03-prompt-engineering/characters/public/index.html
new file mode 100644
index 00000000..6c33ba4d
--- /dev/null
+++ b/lessons/03-prompt-engineering/characters/public/index.html
@@ -0,0 +1,122 @@
+
+
+
+
+
+ Talk to Leonardo
+
+
+
+
+
+
You
+
+
+
+
+
+
+
Ludovico Sforza
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/lessons/03-prompt-engineering/characters/public/sforza.mp3 b/lessons/03-prompt-engineering/characters/public/sforza.mp3
new file mode 100644
index 00000000..90bfe76e
Binary files /dev/null and b/lessons/03-prompt-engineering/characters/public/sforza.mp3 differ
diff --git a/lessons/03-prompt-engineering/sample-app/app.js b/lessons/03-prompt-engineering/sample-app/app.js
new file mode 100644
index 00000000..c277e1b6
--- /dev/null
+++ b/lessons/03-prompt-engineering/sample-app/app.js
@@ -0,0 +1,61 @@
+import { OpenAI } from "openai";
+import readline from "readline";
+
+const rl = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout
+});
+
+const question = (query) => {
+ return new Promise((resolve) => {
+ rl.question(query, (answer) => {
+ resolve(answer);
+ });
+ });
+};
+
+const height = await question("Enter the current height above the ground in meters:");
+
+const speed = await question("Enter the speed at which you're moving forward in meters per second:");
+
+const gravity = await question("Enter the gravity in meters per second squared:");
+
+const wind = await question("Enter the wind speed upwards in meters per second:");
+
+// Distance to the hill
+const distance = 100;
+
+// Create prompt including inputs should include chain of thought
+
+const prompt = "TODO";
+
+// Call the language model with the prompt
+
+const messages = [
+{
+ "role": "user",
+ "content": prompt
+}];
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+// 3. Send the request
+// -----------------------------------
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+});
+
+console.log(`Answer for "${prompt}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
diff --git a/lessons/03-prompt-engineering/sample-app/package-lock.json b/lessons/03-prompt-engineering/sample-app/package-lock.json
new file mode 100644
index 00000000..ae1264ed
--- /dev/null
+++ b/lessons/03-prompt-engineering/sample-app/package-lock.json
@@ -0,0 +1,270 @@
+{
+ "name": "sample-app",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "sample-app",
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "openai": "^4.84.1"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "18.19.75",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.75.tgz",
+ "integrity": "sha512-UIksWtThob6ZVSyxcOqCLOUNg/dyO1Qvx4McgeuhrEtHTLFTf7BBhEazaE4K806FGTPtzd/2sE90qn4fVr7cyw==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz",
+ "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.0"
+ }
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
+ "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/openai": {
+ "version": "4.84.1",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.84.1.tgz",
+ "integrity": "sha512-bVdFsfeVBXN59x9i26dHnYUkY5JPk19w7SydzMvJZDsA+FxwXZrQsUBtdwsQt/ZLuilcc/LzJG7mpJucO2DDHg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ }
+ }
+}
diff --git a/lessons/03-prompt-engineering/sample-app/package.json b/lessons/03-prompt-engineering/sample-app/package.json
new file mode 100644
index 00000000..44add90e
--- /dev/null
+++ b/lessons/03-prompt-engineering/sample-app/package.json
@@ -0,0 +1,16 @@
+{
+ "name": "sample-app",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "openai": "^4.84.1"
+ }
+}
diff --git a/lessons/03-prompt-engineering/solutions/solution-quiz.md b/lessons/03-prompt-engineering/solutions/solution-quiz.md
new file mode 100644
index 00000000..4a2ffcca
--- /dev/null
+++ b/lessons/03-prompt-engineering/solutions/solution-quiz.md
@@ -0,0 +1,3 @@
+
+
+**Correct: B, C**
\ No newline at end of file
diff --git a/lessons/03-prompt-engineering/solutions/solution.md b/lessons/03-prompt-engineering/solutions/solution.md
new file mode 100644
index 00000000..ba6d9516
--- /dev/null
+++ b/lessons/03-prompt-engineering/solutions/solution.md
@@ -0,0 +1,118 @@
+Here's the solution:
+
+```javascript
+import { OpenAI } from "openai";
+import readline from "readline";
+
+const rl = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout
+});
+
+const question = (query) => {
+ return new Promise((resolve) => {
+ rl.question(query, (answer) => {
+ resolve(answer);
+ });
+ });
+};
+
+const height = await question("Enter the current height above the ground in meters:");
+
+const speed = await question("Enter the speed at which you're moving forward in meters per second:");
+
+const gravity = await question("Enter the gravity in meters per second squared:");
+
+const wind = await question("Enter the wind speed upwards in meters per second:");
+
+// Distance to the hill
+const distance = 100;
+
+// Create prompt including inputs should include chain of thought
+
+const prompt = `Current height above the ground: ${height} meters, Moving forward at: ${speed} meters per second, Gravity: ${gravity} meters per second squared, Wind upwards at: ${wind} meters per second, Distance to the hill: ${distance} meters, Time it will take to reach the hill: t, Here's the formula to calculate the time it will take to reach the hill: D = 1/2 * (g - w) * t^2.`;
+
+// Call the language model with the prompt
+
+const messages = [
+{
+ "role": "user",
+ "content": prompt
+}];
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+// 3. Send the request
+// -----------------------------------
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+});
+
+console.log(`Answer for "${prompt}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
+
+```
+
+## Sample output
+
+```text
+Enter the current height above the ground in meters:100
+Enter the speed at which you're moving forward in meters per second:3
+Enter the gravity in meters per second squared:9.82
+Enter the wind speed upwards in meters per second:3
+Answer for "Current height above the ground: 100 meters, Moving forward at: 3 meters per second, Gravity: 9.82 meters per second squared, Wind upwards at: 3 meters per second, Distance to the hill: 100 meters, Time it will take to reach the hill: t, Here's the formula to calculate the time it will take to reach the hill: D = 1/2 * (g - w) * t^2.":
+To determine the time \( t \) it will take to reach the hill, we can rearrange the formula you provided:
+
+\[
+D = \frac{1}{2} (g - w) t^2
+\]
+
+Where:
+- \( D \) is the distance to the hill (100 meters),
+- \( g \) is the acceleration due to gravity (9.82 m/s²),
+- \( w \) is the upward wind speed (3 m/s).
+
+First, we need to calculate \( g - w \):
+
+\[
+g - w = 9.82 \, \text{m/s}^2 - 3 \, \text{m/s} = 6.82 \, \text{m/s}^2
+\]
+
+Now, substituting \( D = 100 \) meters into the formula:
+
+\[
+100 = \frac{1}{2} \times 6.82 \times t^2
+\]
+
+To isolate \( t^2 \), we can multiply both sides by 2:
+
+\[
+200 = 6.82 t^2
+\]
+
+Next, we divide both sides by 6.82:
+
+\[
+t^2 = \frac{200}{6.82} \approx 29.34
+\]
+
+Now, taking the square root of both sides gives us \( t \):
+
+\[
+t \approx \sqrt{29.34} \approx 5.42 \, \text{seconds}
+\]
+
+So, it will take approximately **5.42 seconds** to reach the hill under the given conditions.
+```
diff --git a/lessons/04-structured-output/README.md b/lessons/04-structured-output/README.md
new file mode 100644
index 00000000..a1a38a21
--- /dev/null
+++ b/lessons/04-structured-output/README.md
@@ -0,0 +1,574 @@
+# Structured output
+
+In this chapter, you will learn the following:
+
+- **Structured output**, what it is and how to leverage it effectively.
+- **Extract information from prompts** and incorporate it into the output.
+- **Present different types of output** like JSON and other types of formats for easy consumption by services.
+
+## Getting Started
+
+If you haven't already forked/cloned this repo in a previous chapter, follow these steps to get started with this curriculum:
+
+1. **Fork the Repository**: Select the _fork_ button in the upper right-hand corner of the repository or select this button:
+ [](https://github.com/microsoft/generative-ai-with-javascript/fork)
+2. **Clone the Repository**: Navigate to your forked repository and select the clone button in the upper right-hand corner or select this button: [](https://github.com/microsoft/generative-ai-with-javascript.git)
+3. **Open in GitHub Codespaces**: To run the course projects quickly and easily and use the provided [GitHub Models](https://docs.github.com/en/github-models), select the following button: [](https://codespaces.new/microsoft/generative-ai-with-javascript)
+
+
+ [](https://github.com/microsoft/generative-ai-with-javascript.git)
+
+3. **Open in GitHub Codespaces**: Starting this repository with GitHub Codespaces will enable you to run the same code as it uses [GitHub Models](https://docs.github.com/en/github-models).
+
+ Click the button below to start a Codespace:
+ [](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=832180352)
+
+## Narrative - out of the frying pan and into the fire
+
+> _Our story so far: You, a mechanically trained engineer from 1860, have been on a journey through time with Leonardo da Vinci. You've escaped Roman soldiers — or rather, you're in the process of escaping them— and you're desperately searching for a way to land safely, if possible_.
+
+The wind whips around you as you and Leonardo da Vinci ascend in his aerial screw, the wooden frame creaking under the strain. The Roman soldiers below are relentless, their shouts growing fainter as you gain altitude.
+
+**You:** "Leonardo, we need to find a way out of this!" you shout over the roar of the wind.
+
+Leonardo, his eyes alight with a mix of fear and excitement, nods.
+
+**Leonardo:** "I have faith in my invention, but we need more than faith to escape these soldiers."
+
+You reach into your satchel and pull out the Time Beetle, a device you discovered in your travels.
+
+**You:** "This might be our only chance," you say, pressing the beetle's intricate mechanisms. A bright light envelops you both, and the world around you shifts and warps.
+
+### The Aztec Empire
+
+The bright light from the Time Beetle fades, and you find yourself standing at the base of a massive stone pyramid. The air is thick with the scent of incense, and the distant sounds of chanting fill your ears. As you look around, you realize that you've landed in the heart of the Aztec Empire.
+
+The aerial screw, now a bit worse for wear, rests awkwardly on a large, intricately carved stone, with markings making you think it might be used as a calendar
+
+
+
+
+
+*Aztec calendar - Wikipedia*
+
+Leonardo da Vinci steps forward, his eyes wide with wonder.
+
+**Leonardo:** "Incredible," he murmurs, running his fingers over the carvings. "But I hope this was of no importance."
+
+You glance down and your heart sinks. The stone is part of the Aztec calendar, and the aerial screw has damaged a section of it.
+
+Before you can react, a group of Aztec soldiers approaches, their obsidian-tipped spears glinting menacingly in the sunlight.
+
+**Soldier leader** "Who are you, and what have you done?" he demands in Nahuatl.
+
+
+
+
+
+You take a deep breath, trying to steady your nerves.
+
+**You:** "We are travelers from a distant land," you begin, hoping your knowledge of ancient languages will serve you well. "This is a flying machine, a gift from the gods to help us reach you. We did not mean to damage your sacred calendar.
+
+The warrior's eyes widen in surprise, but he remains cautious.
+
+**Soldier leader:** "You will come with us to meet the ruler Montezuma. He will decide your fate.
+
+Upon reaching the top of the pyramid, you are led into a grand chamber where Montezuma, the ruler of the Aztec Empire, sits on a throne adorned with gold and feathers. His piercing gaze falls upon you, and he gestures for you to approach.
+
+**Montezuma** "You claim to be travelers from a distant land," Montezuma says, his voice calm but authoritative. "And you have damaged our sacred calendar. How do you intend to make amends?"
+
+
+
+
+
+
+
+## Talk to Montezuma
+
+If you want to talk to Montezuma, run the app [Montezuma](./characters/).
+
+> NOTE: Start a [](https://codespaces.new/microsoft/generative-ai-with-javascript), navigate to _lessons/04-structured-output/characters_, and then run `npm install` followed by `npm start`. Once it appears, select the "Open in Browser" button.
+
+## Let’s play a game
+
+Leonardo steps forward, his calm demeanour a stark contrast to the tension in the air.
+
+**Leonardo:** "We come in peace," he says, his voice steady. "We seek knowledge and understanding. Allow us to demonstrate the power of our machine and share our knowledge with you."
+
+**Montezuma:** "Very well. I propose a game of Patolli. If I win the best of three games, you will give me your device and tell me how it works. If you win, you are free to go."
+
+The game begins, and the room falls silent, save for the sound of beans being rolled and pieces moving across the board.
+
+
+
+
+
+Montezuma wins the first game, his skill and experience evident. Leonardo, however, remains undeterred. He studies the board carefully, his mind working through the strategies and probabilities.
+
+The second game is a close contest, but Leonardo manages to secure a victory, evening the score.
+
+**Leonardo:** "One more game," he says, his eyes gleaming with determination. George (Time beetle), we need to win this one, give me the right moves.
+
+**Time beetle:** Very well, calculating... Here you go...
+
+The final game is a nail-biter. The Aztec soldiers and courtiers gather around, their eyes fixed on the board.
+
+With a final, decisive move, Leonardo wins the game. The room erupts in cheers and applause, the Aztecs clearly impressed by his skill and composure. Montezuma, though disappointed, nods in respect.
+
+**Montezuma:** "You have won fairly," Montezuma says, his voice carrying a note of admiration. "You are free to go, and you may keep your device. But know that you are always welcome here, should you wish to return."
+
+**Time beetle:** I won't say anything if you don't Leonardo ;)
+
+> Patolli is one of the oldest known games in the Americas, played by various pre-Columbian Mesoamerican cultures, including the Aztecs.
+>
+> It is a game of strategy and luck, often associated with gambling.
+>
+> How Patolli is Played:
+>
+> Board and Pieces: The game is played on an X-shaped board with a center block and four arms. Players use five black beans as dice, each marked on one side
+>
+> Setup: Each player selects six markers (beads) of their color and places them off the board. Players also have point beads for scoring
+>
+> Starting the Game: Players roll the beans to move their markers around the board. The goal is to move all markers around the board and back to the starting point
+>
+> Movement: Players roll the beans to determine how many spaces to move. A roll of five allows a marker to move ten spaces
+>
+> Capturing and Scoring: Landing on specific spaces can result in gaining extra turns, losing point beads, or capturing opponent's markers
+>
+> Aztec Involvement:
+>
+> The Aztecs, including nobles and commoners, widely played Patolli. It was a popular pastime at the court of Montezuma, who enjoyed watching his nobles play
+>
+> The game was not just for entertainment but also involved high-stakes gambling, with players betting items like blankets, precious stones, and even their homes or freedom
+
+
+
+*Patolli - Wikipedia*
+
+## Structured Output
+
+**Time beetle:** Hey folks, want to know how I beat Montezuma?
+
+**You:** Sshh, not so loud, we're still in the Aztec Empire.
+
+**Time beetle:** Oh right, sorry, yea so structured output is how. You asked for the right moves and I gave them to you in a structured way.
+
+Structured output uses specific data formats to organize information clearly. Common formats like JSON, XML, and CSV make it easier for services to process and use the data
+
+Generative AI models can output data in various formats. However, output of unstructured text can make it difficult to extract information and hand it over to other services. Structured output can help in this regard.
+
+**You:** Well, aren't you clever ;)
+
+**Time beetle:** I try my best :)
+
+Another benefit, even for human readers, is that structured data is easier to read and understand when it follows a familiar format.
+
+Let's look at a few examples of input and asking for specific output formats can help in extracting information that at least makes it easier to understand and consume.
+
+**You:** Ok, it's not like we got any other plans, let's hear it.
+
+## Shaping the output
+
+**Time beetle:** Let's start with a simple example. Imagine you're Leonardo da Vinci and you're trying to come up with ideas for inventions using 15th-century technology. You want to know what resources you need and how much effort it would take to build each invention. Here's how you can ask for this information:
+
+**Leonardo:** Oh I like this game, everyone pretend you're me :)
+
+> Prompt: Generate ideas for inventions I can build using 15th-century technology. For each invention, specify the required resources and estimate the effort needed to build it.
+
+```text
+Result:
+Sure! Here are some detailed ideas for inventions you can build using 15th-century technology, along with the resources needed and the effort involved:
+
+### 1. Water-Powered Mill
+**Resources Needed**:
+ - Water source (river or stream)
+ - Wood for the water wheel and mill structure
+ - Stone for grinding
+ - Metal for gears and axles
+
+ **Effort**:
+ - **High**: Requires significant construction and engineering skills to build the water wheel, mill structure, and grinding mechanism. You'll also need to ensure a consistent water flow.
+
+ ### 2. Astrolabe
+ **Resources Needed**:
+ - Brass or other metals for the plates and components
+ - Engraving tools
+ - Mathematical knowledge for accurate measurements
+
+ **Effort**:
+ - **Medium**: Involves precise metalworking and engraving. Requires knowledge of > astronomy and mathematics to ensure accuracy.
+
+ ### 3. Printing Press
+ **Resources Needed**:
+ - Wood for the press structure
+ - Metal for movable type
+ - Ink and paper
+
+ **Effort**:
+ - **High**: Building the press and creating movable type requires advanced carpentry > and metalworking skills. Setting up the type and printing also involves meticulous > work.
+
+This is an OK looking response, every idea is structured in the same way, and it's easy to understand. What if you wanted it some other way though? For that, you can instruct how you want it in the text itself or even present an output template. Let's show each technique.
+```
+
+**Leonardo:** Yep totally stealing that water-powered mill idea, I can see it now, a mill by the river, grinding grain into flour, it's perfect! I like this gizmo. What else am I thinking little Gizmo? This will make me less reliant on people like Sforza.
+
+**Time beetle:** Sure, if you could ever finish a project, Leonardo.
+
+**Leonardo:** Hey, I'm working on it!
+
+**Time beetle:** Sure you are, Leonardo, sure you are, that and a hundred other projects.
+
+**Time beetle:** Let's look at how you can instruct the model to present the output in a more structured way.
+
+### Structuring Your Prompts for Better Output
+
+**Time beetle:** You can improve your prompt by instructing the model on how you want the output to be structured. For example, you can ask for a table format with columns that you need. Here's how you can modify the prompt:
+
+> Prompt: Generate ideas for inventions that can be built using 15th-century technology. For each invention, provide the required resources and estimate the effort needed to construct it. Present the information in a table with the following columns: Title, Description, Resources Needed, and Effort.
+
+```text
+Result:
+
+| Title | Description | Resources Needed | Effort |
+|----------------------|----------------------------------------------------------|-------------------------------------------------------|--------|
+| Water-Powered Mill | A mill powered by a water wheel to grind grain into flour.| Water source, wood, stone, metal for gears and axles | High |
+| Astrolabe | An astronomical instrument for solving problems related to time and star positions. | Brass or other metals, engraving tools, mathematical knowledge | Medium |
+| Printing Press | A press for reproducing texts more efficiently than hand-copying. | Wood, metal for movable type, ink, paper | High |
+
+This time we end up with a table that is easier to read and understand.
+```
+
+**Leonardo:** I like this, will save me a lot of time for sure.
+
+**You:** Yea, me too.
+
+### Presenting an output template
+
+Another way of getting structured output is to present an output template, here it can be any format, from columns to JSON, XML, or CSV.
+
+> Prompt Generate ideas for inventions that can be built using 15th-century technology. For each invention, specify the required resources and estimate the effort needed to construct it. Present the information using the following format:
+>
+> Title: [Invention Name]
+> Description: [Brief Explanation]
+> Resources Needed: [List of Required Materials]
+> Effort: [Estimated Difficulty or Labor Required]
+Generate ideas for inventions I can build given 1500th century technology, please let me know for each what resources I need and how much effort there would be to build each invention, for each idea present it with the following output template:
+
+```text
+
+**Result**:
+
+ Sure! Here are some ideas for inventions you can build using 15th-century technology, presented in the requested format:
+
+ ### 1. Water-Powered Mill
+ **Title**: Water-Powered Mill
+
+ **Description**: A mill powered by a water wheel to grind grain into flour.
+
+ **Resources Needed**: Water source, wood, stone, metal for gears and axles.
+
+ **Effort**: High - Requires significant construction and engineering skills to build the water wheel, mill structure, and grinding mechanism. Ensuring a consistent water flow is also necessary.
+
+ ### 2. Astrolabe
+ **Title**: Astrolabe
+
+ **Description**: An astronomical instrument for solving problems related to time and star positions.
+
+ **Resources Needed**: Brass or other metals, engraving tools, mathematical knowledge.
+
+ **Effort**: Medium - Involves precise metalworking and engraving. Requires knowledge of astronomy and mathematics to ensure accuracy.
+
+ ### 3. Printing Press
+ **Title**: Printing Press
+
+ **Description**: A press for reproducing texts more efficiently than hand-copying.
+
+ **Resources Needed**: Wood, metal for movable type, ink, paper.
+
+ **Effort**: High - Building the press and creating movable type requires advanced carpentry and metalworking skills. Setting up the type and printing also involves meticulous work.
+
+ ### 4. Mechanical Clock
+ **Title**: Mechanical Clock
+
+ **Description**: A clock using gears and weights to keep time.
+
+ **Resources Needed**: Metal for gears, springs, weights, wood for casing, precise tools.
+
+ **Effort**: Very High - Requires intricate metalworking and a deep understanding of mechanics. Building a reliable clock involves precise assembly and calibration.
+```
+
+**Leonardo:** This is great, but who is Jason?
+
+**Time beetle:** JSON, Leonardo, JSON. It's a structured format that is easy to consume by services or applications.
+
+**Leonardo:** That's what I said, Jason.
+
+**Time beetle:** _Sigh_.
+
+As you can see, the output is structured according to the specifications in the output template. This makes it easier to read and understand and it also creates repeatable output that can be easily consumed by other services or applications.
+
+Of course a service benefits from even more structured output, like JSON, XML, or CSV so let's look at how you can ask for that.
+
+**Time beetle:** But wait, there's more!
+
+**You:** There always is with you, George. _sigh_
+
+### Using structured output with formats like JSON, XML, or CSV
+
+**Time beetle:** So far, you've seen how you can control the output structure by instructing the model in the prompt. You have even provided an output template. Let's take that idea a step further and see how it can be applied to more structured output formats like JSON and other types of formats.
+
+Modify the prompt accordingly to ask for JSON output:
+
+> **Prompt**: Generate ideas for inventions I can build given 1500th century technology, please let me know for each what resources I need and how much effort there would be to build each invention, for each idea present in json format, include properties for title, description, resources, needed, effort
+>
+> Example of the JSON output:
+> [
+> {
+> "title": "",
+> "description": ".",
+> "resources_needed": "",
+> "effort": ""
+> }
+> ]
+
+```text
+> Response:
+Sure! Here are some ideas for inventions you can build using 15th-century technology, presented in JSON format:
+
+```json
+[
+ {
+ "title": "Water-Powered Mill",
+ "description": "A mill powered by a water wheel to grind grain into flour.",
+ "resources_needed": "Water source, wood, stone, metal for gears and axles",
+ "effort": "High - Requires significant construction and engineering skills to build the water wheel, mill structure, and grinding mechanism. Ensuring a consistent water flow is also necessary."
+ },
+ {
+ "title": "Astrolabe",
+ "description": "An astronomical instrument for solving problems related to time and star positions.",
+ "resources_needed": "Brass or other metals, engraving tools, mathematical knowledge",
+ "effort": "Medium - Involves precise metalworking and engraving. Requires knowledge of astronomy and mathematics to ensure accuracy."
+ },
+ {
+ "title": "Printing Press",
+ "description": "A press for reproducing texts more efficiently than hand-copying.",
+ "resources_needed": "Wood, metal for movable type, ink, paper",
+ "effort": "High - Building the press and creating movable type requires advanced carpentry and metalworking skills. Setting up the type and printing also involves meticulous work."
+ },
+ {
+ "title": "Mechanical Clock",
+ "description": "A clock using gears and weights to keep time.",
+ "resources_needed": "Metal for gears, springs, weights, wood for casing, precise tools",
+ "effort": "Very High - Requires intricate metalworking and a deep understanding of mechanics. Building a reliable clock involves precise assembly and calibration."
+ },
+ {
+ "title": "Windmill",
+ "description": "A mill powered by wind to grind grain or pump water.",
+ "resources_needed": "Wood, metal for gears and axles, stone for the base",
+ "effort": "High - Constructing a windmill involves significant carpentry and engineering skills. You'll need to design and build the blades, tower, and internal mechanisms."
+ },
+ {
+ "title": "Armillary Sphere",
+ "description": "A model of the celestial sphere with rings representing the great circles.",
+ "resources_needed": "Metal for rings and stand, tools for bending and joining metal, astronomy knowledge",
+ "effort": "Medium - Involves precise metalworking and assembly. Requires an understanding of celestial movements to accurately position the rings."
+ },
+ {
+ "title": "Siege Engine (Trebuchet or Catapult)",
+ "description": "A trebuchet or catapult for demonstrating principles of mechanics and physics.",
+ "resources_needed": "Wood, rope, metal for axles and counterweights",
+ "effort": "Medium to High - Building a functional siege engine requires carpentry and an understanding of mechanics. Testing and adjusting for optimal performance can be time-consuming."
+ },
+ {
+ "title": "Distillation Apparatus",
+ "description": "A setup for purifying liquids through distillation.",
+ "resources_needed": "Glass or metal for flask and condenser, heat source, tubing",
+ "effort": "Medium - Requires basic glassblowing or metalworking skills. Setting up and maintaining the correct temperatures for distillation involves careful monitoring."
+ },
+ {
+ "title": "Compass",
+ "description": "A magnetic compass for navigation.",
+ "resources_needed": "Magnetized needle, non-magnetic housing, water or oil for damping",
+ "effort": "Low - Constructing a compass is relatively simple. The main challenge is magnetizing the needle and ensuring it is balanced and free to rotate."
+ },
+ {
+ "title": "Aqueduct",
+ "description": "A system to transport water using gravity.",
+ "resources_needed": "Stone or brick, mortar, tools for cutting and shaping stone",
+ "effort": "Very High - Building an aqueduct involves extensive construction work and engineering. Ensuring a consistent gradient for water flow requires precise planning and execution."
+ }
+]
+```
+
+This is great, this output is something you can send to another service or application that can consume JSON data.
+
+**You:** If you say so.
+
+**Time beetle:** I do, I do.
+
+## Extracting data from prompts
+
+**Time beetle:** Imagine you're building a chatbot that helps users find information about trips to book. You want to extract key details from the user's input to provide relevant responses. Let's say you have a description like so: Type where you would like to go, when you want to travel, and your budget.
+
+Hopefully the user reads said instruction, what does a prompt look like that extracts the information?
+
+> Prompt: Extract the following information from a users prompt: location, when, budget, currency, respond with the extracted data in JSON format, user prompt: I want to go to Greece in June, my budget is 800 euro.
+
+
+> Response:
+
+```json
+{
+ "location": "Greece",
+ "when": "June",
+ "budget": 800,
+ "currency": "euro"
+}
+```
+
+**Leonardo:** He doesn't stop talking does he?
+
+**You:** Let's see if I can find the off switch.
+
+**Time beetle:** Let's try this with a similar prompt, let's vary the user input a bit so we can be sure the model is doing the right thing.
+
+> Prompt: Extract the following information from a users prompt: location, when, budget, currency. Respond with the extracted data in JSON format. User prompt: Going to US, max 1200 dollars, January maybe.
+>
+> Response:
+
+```json
+{
+ "location": "US",
+ "when": "January",
+ "budget": 1200,
+ "currency": "dollars"
+}
+```
+
+What you can do as a developer is then create a template like so:
+
+```text
+Extract the following information from a users prompt: location, when, budget, currency, respond with the extracted data in JSON format, user prompt: {user_input}.
+```
+
+**You:** Found it, take that George!
+
+**Time beetle:** _Powering down_
+
+**Leonardo:** Peace and quite at last.
+
+**Time beetle:** _Initiating emergency startup_ Where was I again? Oh yes.
+
+**Time beetle:** For chat bots, this is a very powerful feature as it allows you to extract information from the user's input. What if the user might ask for all sorts of things? You can create a template where you describe skills, things that your chatbot can do and what parameters it needs, here's how you can do that:
+
+```text
+Here are some skills you have:
+
+skill: book_trip
+parameters: location, when, budget, currency
+
+skill: find_hotel
+parameters: location, check_in, check_out, budget
+
+skill: order_food
+parameters: cuisine, delivery_address, payment_method
+
+Figure out which skill to use based on a users prompt and extract the necessary parameters, respond with the extracted data in JSON format with columns skill, parameters, extracted_data
+
+{user_input}
+```
+
+Assuming that {user_input} contains the previous skills, let's try out the following user inputs:
+
+> Prompt: Going to US, max 1200 dollars, January maybe.
+> Result:
+
+```json
+{
+ "skill": "book_trip",
+ "parameters": ["location", "when", "budget", "currency"],
+ "extracted_data": {
+ "location": "US",
+ "when": "January",
+ "budget": 1200,
+ "currency": "dollars"
+ }
+}
+```
+
+> Prompt: I want to book a place to stay in New York
+> Result:
+
+```json
+{
+ "skill": "find_hotel",
+ "parameters": ["location", "check_in", "check_out", "budget"],
+ "extracted_data": {
+ "location": "New York",
+ "check_in": null,
+ "check_out": null,
+ "budget": null
+ }
+}
+```
+
+This combination of presenting skills and parameters and extracting data from prompts is also known as _tool calling_ or _function calling_. A response like this from your model makes it easy for you as developer to understand what the user wants and how to call a corresponding function to fulfill the user's request.
+
+**You:** Well that lasted for a full 20 seconds. Actually, what you said there, George, that was pretty good.
+
+## Assignment - repair the aerial screw
+
+**Time beetle:** I need both of your attention, we have a problem. The aerial screw took some damage during the escape from the Roman soldiers and the landing in the Aztec Empire. Leonardo da Vinci's machine is quite sensitive and it requires very specific input to repair it. You need to provide the necessary information to your time beetle (AI Assistant) to help you repair the aerial screw.
+
+**Leonardo:** So I managed to fix the damage to the aerial screw but now it needs a boot sequence: left left up right:
+You: sounds easy enough, just that text?
+
+**Leonardo:** Yes, but it' special, it needs to be mirrored, that's how I write all my text. And of course I use a Caesar cipher with a shift of 3 because I'm Italian. :)
+
+**You:** Yes, could you just type it in then?
+
+**Leonardo:** I could, but I'm intrigued to see if you and the time beetle can do it.
+
+**You:** Really? Fine.
+
+Instruction: Write a prompt that asks for the boot sequence for the aerial screw, the text should be mirrored and encoded with a Caesar cipher with a shift of 3. Respond with the encoded text.
+
+See the [sample app](./sample-app) for code to get started with. It contains a Node.js project with all the dependencies you need.
+
+> NOTE: If you haven't created a Codespace, please do so now as you will need it to get the AI communication in the sample app to work.
+> See start of this lesson for instructions on creating a codespace.
+
+## Solution
+
+[Solution](./solution/solution.md)
+
+## Challenge
+
+Adopt the example of trip booking and providing skills and extracting parameters to a domain of your choice. Write a prompt that asks for a user input and then extracts the necessary information to fulfill the user's request. Respond with the extracted data in JSON format with columns skill, parameters, extracted_data.
+
+Also determine the skill based on the user's input.
+
+## Summary
+
+In this chapter, we discussed structured output and how it can be used to present information in a structured manner.
+
+We explored different ways to shape the output, including instructing the model in the text, presenting an output template, and using formats like JSON, XML, or CSV.
+
+Additionally, we learned how to extract data from prompts and present it in a structured format.
+
+By leveraging structured output effectively, you can make it easier to understand and consume information generated by generative AI models.
+
+## Knowledge Check
+
+Q: What is structured output used for?
+
+- A) Presenting information in a structured manner
+- B) Extracting data from prompts
+- C) Generating unstructured text
+
+
+
+[Solution quiz](./solution/solution-quiz.md)
diff --git a/lessons/04-structured-output/assets/aztec.png b/lessons/04-structured-output/assets/aztec.png
new file mode 100644
index 00000000..e5c942ec
Binary files /dev/null and b/lessons/04-structured-output/assets/aztec.png differ
diff --git a/lessons/04-structured-output/assets/game.png b/lessons/04-structured-output/assets/game.png
new file mode 100644
index 00000000..52be329b
Binary files /dev/null and b/lessons/04-structured-output/assets/game.png differ
diff --git a/lessons/04-structured-output/assets/meeting.png b/lessons/04-structured-output/assets/meeting.png
new file mode 100644
index 00000000..349c5811
Binary files /dev/null and b/lessons/04-structured-output/assets/meeting.png differ
diff --git a/lessons/04-structured-output/assets/montezuma.jpeg b/lessons/04-structured-output/assets/montezuma.jpeg
new file mode 100644
index 00000000..8eef3899
Binary files /dev/null and b/lessons/04-structured-output/assets/montezuma.jpeg differ
diff --git a/lessons/04-structured-output/assets/montezuma.mp3 b/lessons/04-structured-output/assets/montezuma.mp3
new file mode 100644
index 00000000..6438765e
Binary files /dev/null and b/lessons/04-structured-output/assets/montezuma.mp3 differ
diff --git a/lessons/04-structured-output/assets/patolli.png b/lessons/04-structured-output/assets/patolli.png
new file mode 100644
index 00000000..a08df7a9
Binary files /dev/null and b/lessons/04-structured-output/assets/patolli.png differ
diff --git a/lessons/04-structured-output/characters/app.js b/lessons/04-structured-output/characters/app.js
new file mode 100644
index 00000000..d62fdc57
--- /dev/null
+++ b/lessons/04-structured-output/characters/app.js
@@ -0,0 +1,64 @@
+import express from 'express';
+import { OpenAI } from 'openai';
+import path from 'path';
+import { fileURLToPath } from 'url';
+import dotenv from 'dotenv';
+
+dotenv.config();
+
+const app = express();
+const port = process.env.PORT || 3000;
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = path.dirname(__filename);
+
+app.use(express.json());
+
+// Serve static files from the 'public' directory
+app.use(express.static(path.join(__dirname, 'public')));
+
+// Serve index.html on the default route
+app.get('/', (req, res) => {
+ res.sendFile(path.join(__dirname, 'public', 'index.html'));
+});
+
+// Route to send the prompt
+app.post('/send', async (req, res) => {
+ const { message } = req.body;
+ const prompt = message;
+
+ const messages = [
+ {
+ "role": "system",
+ "content": "You are Montezuma leader of the Aztecs, limit your responses to only the time you live in, you don't know anything else",
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ];
+
+ const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+ });
+
+ try {
+ console.log(`sending prompt ${prompt}`)
+ const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+ });
+
+ res.json({
+ prompt: prompt,
+ answer: completion.choices[0]?.message?.content
+ });
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
+
+app.listen(port, () => {
+ console.log(`Server is running on http://localhost:${port}`);
+});
diff --git a/lessons/04-structured-output/characters/package-lock.json b/lessons/04-structured-output/characters/package-lock.json
new file mode 100644
index 00000000..a0bcebaa
--- /dev/null
+++ b/lessons/04-structured-output/characters/package-lock.json
@@ -0,0 +1,1007 @@
+{
+ "name": "characters",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "characters",
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "dotenv": "^16.4.7",
+ "express": "^4.21.2",
+ "openai": "^4.84.1"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "18.19.75",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.75.tgz",
+ "integrity": "sha512-UIksWtThob6ZVSyxcOqCLOUNg/dyO1Qvx4McgeuhrEtHTLFTf7BBhEazaE4K806FGTPtzd/2sE90qn4fVr7cyw==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz",
+ "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.0"
+ }
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/body-parser": {
+ "version": "1.20.3",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
+ "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.5",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.13.0",
+ "raw-body": "2.5.2",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/call-bound": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.3.tgz",
+ "integrity": "sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA==",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "get-intrinsic": "^1.2.6"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
+ "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ=="
+ },
+ "node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/dotenv": {
+ "version": "16.4.7",
+ "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
+ "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://dotenvx.com"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="
+ },
+ "node_modules/encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/express": {
+ "version": "4.21.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
+ "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
+ "dependencies": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.3",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.7.1",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.3.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.3",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.12",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.13.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.19.0",
+ "serve-static": "1.16.2",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/finalhandler": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
+ "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
+ "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.7.tgz",
+ "integrity": "sha512-VW6Pxhsrk0KAOqs3WEd0klDiF/+V7gQOpAvY1jVU/LHmaD/kQO4523aiJuikX/QAKYiW6x8Jh+RJej1almdtCA==",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.0.0",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.0",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
+ "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.4",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
+ "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/openai": {
+ "version": "4.84.1",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.84.1.tgz",
+ "integrity": "sha512-bVdFsfeVBXN59x9i26dHnYUkY5JPk19w7SydzMvJZDsA+FxwXZrQsUBtdwsQt/ZLuilcc/LzJG7mpJucO2DDHg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "0.1.12",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
+ "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ=="
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.13.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
+ "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
+ "dependencies": {
+ "side-channel": "^1.0.6"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
+ "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "node_modules/send": {
+ "version": "0.19.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
+ "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/send/node_modules/encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/serve-static": {
+ "version": "1.16.2",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz",
+ "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
+ "dependencies": {
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.19.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="
+ },
+ "node_modules/side-channel": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
+ "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3",
+ "side-channel-list": "^1.0.0",
+ "side-channel-map": "^1.0.1",
+ "side-channel-weakmap": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-list": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
+ "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
+ "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-weakmap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
+ "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3",
+ "side-channel-map": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ }
+ }
+}
diff --git a/lessons/04-structured-output/characters/package.json b/lessons/04-structured-output/characters/package.json
new file mode 100644
index 00000000..6f1c24ef
--- /dev/null
+++ b/lessons/04-structured-output/characters/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "characters",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "dotenv": "^16.4.7",
+ "express": "^4.21.2",
+ "openai": "^4.84.1"
+ }
+}
diff --git a/lessons/04-structured-output/characters/public/index.html b/lessons/04-structured-output/characters/public/index.html
new file mode 100644
index 00000000..3e3ea50b
--- /dev/null
+++ b/lessons/04-structured-output/characters/public/index.html
@@ -0,0 +1,122 @@
+
+
+
+
+
+ Chat and Image Layout
+
+
+
+
+
+
You
+
+
+
+
+
+
+
Montezuma
+
+
+
+
+
+
+
+
+
+
diff --git a/lessons/04-structured-output/characters/public/montezuma.jpeg b/lessons/04-structured-output/characters/public/montezuma.jpeg
new file mode 100644
index 00000000..8eef3899
Binary files /dev/null and b/lessons/04-structured-output/characters/public/montezuma.jpeg differ
diff --git a/lessons/04-structured-output/characters/public/montezuma.mp3 b/lessons/04-structured-output/characters/public/montezuma.mp3
new file mode 100644
index 00000000..6438765e
Binary files /dev/null and b/lessons/04-structured-output/characters/public/montezuma.mp3 differ
diff --git a/lessons/04-structured-output/sample-app/app.js b/lessons/04-structured-output/sample-app/app.js
new file mode 100644
index 00000000..e652ecdc
--- /dev/null
+++ b/lessons/04-structured-output/sample-app/app.js
@@ -0,0 +1,41 @@
+import { OpenAI } from "openai";
+
+// Distance to the hill
+const distance = 100;
+
+// Create prompt including inputs should include chain of thought
+
+const boot_sequence = `left left up right`;
+
+const prompt = `TODO`;
+
+// Call the language model with the prompt
+
+const messages = [
+{
+ "role": "user",
+ "content": prompt
+}];
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+// 3. Send the request
+// -----------------------------------
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+});
+
+console.log(`Answer for "${prompt}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
diff --git a/lessons/04-structured-output/sample-app/package-lock.json b/lessons/04-structured-output/sample-app/package-lock.json
new file mode 100644
index 00000000..ae1264ed
--- /dev/null
+++ b/lessons/04-structured-output/sample-app/package-lock.json
@@ -0,0 +1,270 @@
+{
+ "name": "sample-app",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "sample-app",
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "openai": "^4.84.1"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "18.19.75",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.75.tgz",
+ "integrity": "sha512-UIksWtThob6ZVSyxcOqCLOUNg/dyO1Qvx4McgeuhrEtHTLFTf7BBhEazaE4K806FGTPtzd/2sE90qn4fVr7cyw==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz",
+ "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.0"
+ }
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
+ "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/openai": {
+ "version": "4.84.1",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.84.1.tgz",
+ "integrity": "sha512-bVdFsfeVBXN59x9i26dHnYUkY5JPk19w7SydzMvJZDsA+FxwXZrQsUBtdwsQt/ZLuilcc/LzJG7mpJucO2DDHg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ }
+ }
+}
diff --git a/lessons/04-structured-output/sample-app/package.json b/lessons/04-structured-output/sample-app/package.json
new file mode 100644
index 00000000..44add90e
--- /dev/null
+++ b/lessons/04-structured-output/sample-app/package.json
@@ -0,0 +1,16 @@
+{
+ "name": "sample-app",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "openai": "^4.84.1"
+ }
+}
diff --git a/lessons/04-structured-output/solution/solution-quiz.md b/lessons/04-structured-output/solution/solution-quiz.md
new file mode 100644
index 00000000..4a71f66f
--- /dev/null
+++ b/lessons/04-structured-output/solution/solution-quiz.md
@@ -0,0 +1,3 @@
+The answer is:
+
+answer: A, B
\ No newline at end of file
diff --git a/lessons/04-structured-output/solution/solution.md b/lessons/04-structured-output/solution/solution.md
new file mode 100644
index 00000000..aab8fcef
--- /dev/null
+++ b/lessons/04-structured-output/solution/solution.md
@@ -0,0 +1,83 @@
+Here's the solution
+
+```javascript
+import { OpenAI } from "openai";
+
+// Distance to the hill
+const distance = 100;
+
+// Create prompt including inputs should include chain of thought
+
+const boot_sequence = `left left up right`;
+
+const prompt = `Provide the boot sequence for the aerial screw, the text should be mirrored and encoded with a Caesar cipher with a shift of 3. The boot sequence is: ${boot_sequence}.`;
+
+// Call the language model with the prompt
+
+const messages = [
+{
+ "role": "user",
+ "content": prompt
+}];
+
+// 2. Create client
+// -----------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+// 3. Send the request
+// -----------------------------------
+
+const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+});
+
+console.log(`Answer for "${prompt}":`);
+
+// 4. Print the answer
+// -----------------------------------
+
+console.log(completion.choices[0]?.message?.content);
+
+```
+
+## Sample output
+
+```text
+Answer for "Provide the boot sequence for the aerial screw, the text should be mirrored and encoded with a Caesar cipher with a shift of 3. The boot sequence is: left left up right.":
+To provide the mirrored and encoded boot sequence for the aerial screw, let's follow these steps:
+
+1. First, write the boot sequence:
+ **left left up right**
+
+2. Next, mirror the sequence:
+ **thgir pu tfelt tfel**
+
+3. Then, apply a Caesar cipher with a shift of 3.
+
+Here's how each letter shifts:
+- 't' shifts to 'w'
+- 'h' shifts to 'k'
+- 'g' shifts to 'j'
+- 'i' shifts to 'l'
+- 'r' shifts to 'u'
+- 'p' shifts to 's'
+- 'u' shifts to 'x'
+- 'f' shifts to 'i'
+- 'e' shifts to 'h'
+- 'l' shifts to 'o'
+- 't' shifts to 'w'
+
+So, the encoded sequence becomes:
+- thgir -> wklju
+- pu -> sx
+- tfelt -> whiho
+- tfel -> whiho
+
+Putting this all together, the final result is:
+**wklju sx whiho whiho**
+```
diff --git a/lessons/05-rag/README.md b/lessons/05-rag/README.md
new file mode 100644
index 00000000..8d8797c1
--- /dev/null
+++ b/lessons/05-rag/README.md
@@ -0,0 +1,347 @@
+# Lesson 5: Talk to your data with Retrieval-Augmented Generation (RAG)
+
+In this lesson, you will learn:
+
+- The basics of Retrieval-Augmented Generation (RAG) and how it can be used to enhance the responses of generative AI models.
+- How to integrate external data sources into your AI application.
+- To leverage your data to improve the relevance and accuracy of the AI-generated content.
+
+## Getting Started
+
+If you haven't already forked/cloned this repo in a previous chapter, follow these steps to get started with this curriculum:
+
+1. **Fork the Repository**: Select the _fork_ button in the upper right-hand corner of the repository or select this button:
+ [](https://github.com/microsoft/generative-ai-with-javascript/fork)
+2. **Clone the Repository**: Navigate to your forked repository and select the clone button in the upper right-hand corner or select this button: [](https://github.com/microsoft/generative-ai-with-javascript.git)
+3. **Open in GitHub Codespaces**: To run the course projects quickly and easily and use the provided [GitHub Models](https://docs.github.com/en/github-models), select the following button: [](https://codespaces.new/microsoft/generative-ai-with-javascript)
+
+
+ [](https://github.com/microsoft/generative-ai-with-javascript.git)
+
+3. **Open in GitHub Codespaces**: Starting this repository with GitHub Codespaces will enable you to run the same code as it uses [GitHub Models](https://docs.github.com/en/github-models).
+
+ Click the button below to start a Codespace:
+ [](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=832180352)
+
+## Related resources
+
+[](https://www.youtube.com/watch?v=xkFOmx5yxIA&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=4)
+
+*🎥 Click on the image above to watch a short video about RAG*
+
+- **Slides:** [pptx](../../videos/slides/03-rag.pptx) / [pdf](../../videos/slides/pdf/03-rag.pdf)
+
+## Narrative - Genesis
+
+> *Our story so far. You are a mechanic from 1860s London. You were working on your automaton and received a letter from Charles Babbage that ended up taking you to a library where you picked up a time travel device. Throughout your travels in time you've ended up in Alexandria, Florence, where you met Leonardo Da Vinci. You also went to the Aztec empire and this is where the story continues...*
+
+The air was thick with incense and the hum of Aztec chants. The temple walls, adorned with carvings of gods and warriors, captivated Leonardo da Vinci. "Incredibile, the craftsmanship is beyond anything I've seen," he murmured, tracing the lines with his fingers.
+
+You held the time beetle device, its surface warm and pulsating with a faint blue glow.
+
+**You**: "Leonardo, it's time to go," you said, pressing the button. The device whirred to life, and a mechanical voice echoed, "It's time to go home, it's time for 'genesis'."
+
+**Leonardo:** "Genesis? Che cosa significa?" Leonardo asked, confused. Before you could respond, the world dissolved into a blur of colors and sounds, the temple fading away as you were pulled through time
+
+You land in garden, it's late at night with a thick fog and eerie lights are flickering in the distance. The mansion looms before you, its grandeur casting a shadow over the garden. Leonardo looked around, his eyes wide with wonder. "Mio Dio".
+
+
+
+
+
+Leonardo looked around, his eyes wide with wonder. "Mio Dio, this place is beautiful!" although a bit, how do you say, spooky?.
+
+### Running from the Dogs
+
+Before you could take in more of the scenery, a low growl caught your attention. A pack of large, imposing dogs bounded towards you, teeth bared. "Run!" you shouted, grabbing Leonardo's arm and sprinting towards the mansion.
+
+
+
+
+
+You just reached the mansion's door, it swung open, and a pair of servants rushed out. They looked you up and down, their expressions a mix of curiosity and suspicion, then gestured for you to follow them.
+
+Inside, the mansion was just as impressive as the garden, with ornate furnishings and gleaming chandeliers. The servants led you through grand halls, down a spiral staircase, and into a dimly lit basement. The air was damp and cool, with a faint scent of mildew. The only light came from a flickering candle, casting long shadows on the stone walls. The servants stopped in front of a heavy wooden door and knocked
+
+The door creaked open, revealing Ada Lovelace, her eyes sparkling with curiosity. "Ah, it's about time you arrived," she said warmly. "We need you to run an errand."
+
+Charles Babbage stepped forward, examining the time beetle in your hand. "This device is remarkable, but it's a bit faulty, isn't it? You've noticed, I'm sure."
+
+
+
+
+
+Leonardo nodded, "Sì, it has been acting strangely."
+
+**Ada:** the device isn't quite ready, we need to give it more capabilities. We need to make it smarter, more aware of the world around it. The idea is for it to be able to retrieve information from different time periods and use it to generate responses that are accurate and relevant. Can you help with that?
+
+**You:** of course, sounds like we need to _augment_ the responses of the device with data, makes sense.
+
+**Ada:** Let's talk about a concept I'd like to call RAG, or Retrieval-Augmented Generation.
+
+## Talk to Ada Lovelace
+
+
+
+
+
+If you want to talk to Ada, run the app [Ada Lovelace](./characters/).
+
+> NOTE: Start a [](https://codespaces.new/microsoft/generative-ai-with-javascript), navigate to _lessons/05-rag/characters_, and then run `npm install` followed by `npm start`. Once it appears, select the "Open in Browser" button.
+
+
+## Known Challenges with LLMs
+
+**Ada**, to get us started, we need to talk more about the artificial intelligence we're going to use to power the device. We'll use so called AI models that when paired with a retrieval mechanism to data can improve the quality of the AI's responses.
+
+However, there are some challenges we need to discuss before we talk about the specifics of RAG. Let's take a moment to discuss some of the known challenges. These models are trained on huge amounts of text data and can generate responses that are contextually relevant and grammatically correct. But, just like any data that you can find out there, the information generated by LLMs can sometimes be inaccurate, incomplete, or misleading, because of multiple factors:
+
+- **Out of date sources:** The data used to train the model may be outdated and no longer accurate.
+- **Wrong or inaccurate information:** The sources used to train the model may contain incorrect or misleading information, like fake news or biased opinions.
+- **Non-authoritative sources:** The model may not be able to distinguish between authoritative and non-authoritative sources in its training data, leading to unreliable information.
+
+This makes it difficult to tell if an information generated by an LLM is correct or not. This is where RAG comes in.
+
+**You:** So I need to make sure the device can provide accurate information, even when it's not sure about the answer?
+
+**Ada:** Yes, that's the idea. By combining the strengths of retrieval-based methods and generative models, we get a better AI system.
+
+## RAG Core Concepts
+
+**Ada:** Ah yes, time to discuss RAG specifically. Let's start with some basics:
+
+Retrieval-Augmented generation (RAG) is a powerful technique that combines the strengths of two different approaches in natural language processing: retrieval-based methods and generative models. This hybrid approach allows for the generation of responses that are both contextually relevant and rich in content, to help alleviate some of the known challenges with LLMs.
+
+At its core, RAG involves two main components: a **retriever** and a **generator**.
+
+- **The retriever:** it's responsible for finding relevant information from external data sources that can be used to enhance the AI-generated responses, like a search engine. This information can be in the form of text, images, or any other type of data that is relevant to the context of the conversation, although text is the most common type of data used.
+
+- **The generator:** it takes the retrieved information and use it to generate a response that is contextually relevant and informative.
+
+Here's a schema illustrating how a RAG system works:
+
+
+
+1. **User input:** The user asks a question.
+2. **Retriever:** The retriever component searches for relevant information using one or more knowledge bases.
+3. **Augmented prompt:** The retrieved information is combined with the user question and context, to create an augmented prompt.
+4. **Generator:** The LLM uses the augmented prompt to generate a response.
+
+This combination allows for more precise and relevant answers, by using data that you provide instead of relying on the model’s training data.
+
+**Ada:** Questions?
+
+**You:** So the retriever finds the information and the generator uses it to generate a response?
+
+**Ada:** Exactly, you're getting the hang of it.
+
+## Integrating external data sources
+
+**Ada:** Now that we've covered the basics of RAG, let's talk about how you can integrate external data sources into your AI application.
+
+Integrating external data sources into your AI application can be done in a variety of ways, depending on the type of data you want to use and the complexity of the retrieval mechanism. Here are a few common methods:
+
+- **APIs:** Many external data sources provide APIs that allow you to access their data programmatically. You can use these APIs to retrieve information in real-time and use it to enhance the AI-generated responses.
+
+- **Databases:** If you have a large amount of data that you want to use for retrieval, you can store it in a database and query it as needed. This can be useful for structured data that needs to be accessed quickly.
+
+Once you've settled on a method for integrating external data sources, you may also need to consider how to preprocess and format the data so that it can be easily used by the AI model. This can involve cleaning the data, converting it to a suitable format (such as plain text or Markdown), or splitting it into smaller chunks for easier retrieval.
+
+> [!NOTE]
+> When integrating external data sources into your AI application, it's important to consider the privacy and security implications of accessing and storing data. Make sure you have the necessary permissions and safeguards in place to protect the data and comply with any relevant regulations.
+
+If you're using a database, you also want to think how you want to *search your data* to retrieve the most relevant information. This can be done using keyword search, full-text search, or more advanced techniques like semantic search or vector search that may need specific indexing. We'll cover advanced search techniques in a future lesson.
+
+**You**: Can you explain terms like API and Databases in more 1860s terms?
+
+**Ada**: Of course, an API is like a messenger that delivers a message from one place to another, and a database is like a library where you store all your books.
+
+**You**: Ah, I see, that makes sense.
+
+## Augmenting the prompt
+
+**Ada:** Are you still with me? Good, let's move on to the next step, let's try improve the prompt our input to the AI model.
+
+**Ada:** After you've implemented a way to retrieve information from your data sources, you can use this information to *augment the prompt* that you provide to the AI model. This can be done by adding the retrieved information to the input text that you pass to the model, along with additional context or instructions to help guide the generation process.
+
+For example, if you're building an app to answer questions about cars, you could have a prompt like this:
+
+```text
+
+## Instructions
+Answer questions about cars using only the sources below.
+If there's not enough data in provided sources, say that you don't know.
+Be brief and straight to the point.
+
+## Sources
+
+
+## Question
+
+```
+
+By providing the AI model with additional context and information, you can help guide the generation process and ensure that the responses are accurate and relevant to the topic at hand.
+
+> [!TIP]
+> Note this part of the prompt: `If there's not enough data in provided sources, say that you don't know.`. This is important to avoid the AI generating incorrect information when there's not enough data to provide a reliable answer. This technique is called an *escape hatch* and is a good practice to ensure the quality of the generated content.
+
+With this part it's becoming more obvious how RAG can also be considered as an advanced form of *prompt engineering*.
+
+### Code Example
+
+**Ada:** Practice makes perfect, let's put our knowledge into work with an example. Let's now put this knowledge into practice by integrating a simple retrieval mechanism into a JavaScript application. We'll use a [CSV](https://fr.wikipedia.org/wiki/Comma-separated_values) file containing data about hybrid cars, and a simple search algorithm to find relevant information based on a user question.
+
+```javascript
+// This example demonstrates how to use the Retrieval Augmented Generation (RAG)
+// to answer questions based on a hybrid car data set.
+// The code below reads the CSV file, searches for matches to the user question,
+// and then generates a response based on the information found.
+
+import { fileURLToPath } from 'node:url';
+import { dirname } from 'node:path';
+import process from "node:process";
+import fs from "node:fs";
+import { OpenAI } from "openai";
+
+// Change the current working directory to the directory of the script
+const __dirname = dirname(fileURLToPath(import.meta.url));
+process.chdir(__dirname);
+
+// 1. Ask a question about hybrid cars
+// -----------------------------------
+
+const question = `what's the fastest prius`;
+
+// 2. Retriever component: search the data for relevant information
+// ----------------------------------------------------------------
+
+// Load CSV data as an array of objects
+const rows = fs.readFileSync("./hybrid.csv", "utf8").split("\n");
+const columns = rows[0].split(",");
+
+// Search the data using a very naive search
+const words = question
+ .toLowerCase()
+ .replaceAll(/[.?!()'":,]/g, "")
+ .split(" ")
+ .filter((word) => word.length > 2);
+const matches = rows.slice(1).filter((row) => words.some((word) => row.toLowerCase().includes(word)));
+
+// Format as a markdown table, since language models understand markdown
+const table =
+ `| ${columns.join(" | ")} |\n` +
+ `|${columns.map(() => "---").join(" | ")}|\n` +
+ matches.map((row) => `| ${row.replaceAll(",", " | ")} |\n`).join("");
+
+console.log(`Found ${matches.length} matches:`);
+console.log(table);
+
+// 3. Context augmentation: create a combined prompt with the search results
+// --------------------------------------------------------------------------
+
+const augmentedPrompt = `
+## Instructions
+Answer questions about a time period or characters from said time period using only the sources below.
+If there's not enough data in provided sources, say that you don't know.
+Be brief and straight to the point.
+
+## Sources
+${table}
+
+## Question
+${question}
+`;
+
+// 4. Generator component: use the search results to generate a response
+// ---------------------------------------------------------------------
+
+const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+});
+
+const chunks = await openai.chat.completions.create({
+ model: "gpt-4o-mini",
+ messages: [{ role: "user", content: augmentedPrompt }],
+ stream: true,
+});
+
+console.log(`Answer for "${question}":`);
+
+for await (const chunk of chunks) {
+ process.stdout.write(chunk.choices[0].delta.content ?? "");
+}
+```
+
+> [!IMPORTANT]
+> Follow [the dev setup guide](../../docs/dev-setup.md) to see how to setup your environment to run this code.
+
+You can find this code in the `example/rag-cars.js` file along with the `hybrid.csv` file containing the data used for the retrieval.
+
+**Ada:** Once you run this code, you should see the data found by in the CSV file by the retriever, formatted as a markdown table, followed by the AI-generated response to the question. Try changing the question to see how the retrieved data and response changes. You can also try asking questions about unrelated topics to see how the AI model handles them.
+
+```text
+Example of the output:
+
+Found 1 matches:
+| Person | Time Period | Description |
+|---|---|---|
+| Leonardo Da Vinci | 15th century | Italian polymath known for his art and inventions. |
+| Isaac Newton | 17th century | English mathematician and physicist who formulated the laws of motion and universal gravitation. |
+```
+
+**You:** This is great, I can see how this can be useful for when using the device, or rather how it has been already or will be, time travel is confusing *sigh*.
+
+**Ada:** There there, you're doing great. Let's move on to the next step.
+
+## Assignment - helping Ada and Charles
+
+Having learned about RAG, you're now ready to help Ada and Charles with their device. However, upon closer inspection the device is looking familiar.
+
+**You:** time beetle, do you know what this is?
+
+**Beetle:** of course, it's me, or well it will be. Im missing a few parts though. Come to think of it, I'm missing a lot of parts, I don't even have a shell yet.
+
+**Ada:** The time beetle is a device that allows you to travel through time and space, that is once we get it to work properly. As I was saying, we need to add a new feature to it, a retrieval-augmented generation (RAG) module. This will help us retrieve information and needed context from different time periods as you're traveling. We want to make sure we refer to all sorts of sources, Wikipedia is a good start.
+
+**Marty** where's the flux capacitor?
+
+**Ada:** Who said that? I'm hearing things now...
+
+**You:** Ok, fine I'll help, or it seems I've already helped as I'm here using a functioning version of the device. What do you need me to do?
+
+**Ada:** Here's an example code that retrieves text information about Tim Berners-Lee from Wikipedia, Tim will be very important one day.
+
+```javascript
+const response = await fetch('https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&redirects=true&explaintext&titles=Tim%20Berners-Lee');
+const data = await response.json();
+const text = Object.values(data.query.pages)[0]?.extract;
+```
+
+**You:** I take it I'm not the only one who's been to the future?
+
+**Ada:** ...
+
+
+
+## Solution
+
+[Solution](./solution/rag-www.js)
+
+## Knowledge Check
+
+**Question**: What is the role of the retriever in a RAG system?
+
+- [ ] A. The retriever generates responses based on the input data.
+- [ ] B. The retriever generates relevant information based on the model's training data.
+- [ ] C. The retriever finds relevant information from external data sources.
+
+[](./solution/solution-quiz.md)
+
+## Self-Study Resources
+
+- **Learn docs:** [Retrieval-Augmented Generation and Indexes](https://learn.microsoft.com/azure/ai-studio/concepts/retrieval-augmented-generation)
+- **Sample apps**:
+ * [Serverless AI Chat with RAG](https://github.com/Azure-Samples/serverless-chat-langchainjs/)
+ * [Ask Youtube: A RAG-based Youtube Q&A API](https://github.com/Azure-Samples/langchainjs-quickstart-demo)
+- **Full length workshop:** [Create your own ChatGPT with RAG](https://moaw.dev/workshop/gh:azure-samples/azure-openai-rag-workshop/docs/workshop-qdrant.md)
diff --git a/lessons/05-rag/assets/ada-2.jpeg b/lessons/05-rag/assets/ada-2.jpeg
new file mode 100644
index 00000000..0c47ee2c
Binary files /dev/null and b/lessons/05-rag/assets/ada-2.jpeg differ
diff --git a/lessons/05-rag/assets/ada.jpeg b/lessons/05-rag/assets/ada.jpeg
new file mode 100644
index 00000000..089989d3
Binary files /dev/null and b/lessons/05-rag/assets/ada.jpeg differ
diff --git a/lessons/05-rag/assets/ada.mp3 b/lessons/05-rag/assets/ada.mp3
new file mode 100644
index 00000000..71dd9fdd
Binary files /dev/null and b/lessons/05-rag/assets/ada.mp3 differ
diff --git a/lessons/05-rag/assets/cern.jpg b/lessons/05-rag/assets/cern.jpg
new file mode 100644
index 00000000..0cb6ca59
Binary files /dev/null and b/lessons/05-rag/assets/cern.jpg differ
diff --git a/lessons/05-rag/assets/dogs.jpeg b/lessons/05-rag/assets/dogs.jpeg
new file mode 100644
index 00000000..412d301f
Binary files /dev/null and b/lessons/05-rag/assets/dogs.jpeg differ
diff --git a/lessons/05-rag/assets/mansion.jpeg b/lessons/05-rag/assets/mansion.jpeg
new file mode 100644
index 00000000..3219e14d
Binary files /dev/null and b/lessons/05-rag/assets/mansion.jpeg differ
diff --git a/lessons/05-rag/assets/rag-example.png b/lessons/05-rag/assets/rag-example.png
new file mode 100644
index 00000000..87d0f3b8
Binary files /dev/null and b/lessons/05-rag/assets/rag-example.png differ
diff --git a/lessons/05-rag/assets/rag.png b/lessons/05-rag/assets/rag.png
new file mode 100644
index 00000000..f489ea3d
Binary files /dev/null and b/lessons/05-rag/assets/rag.png differ
diff --git a/lessons/05-rag/characters/app.js b/lessons/05-rag/characters/app.js
new file mode 100644
index 00000000..9b4b2344
--- /dev/null
+++ b/lessons/05-rag/characters/app.js
@@ -0,0 +1,64 @@
+import express from 'express';
+import { OpenAI } from 'openai';
+import path from 'path';
+import { fileURLToPath } from 'url';
+import dotenv from 'dotenv';
+
+dotenv.config();
+
+const app = express();
+const port = process.env.PORT || 3000;
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = path.dirname(__filename);
+
+app.use(express.json());
+
+// Serve static files from the 'public' directory
+app.use(express.static(path.join(__dirname, 'public')));
+
+// Serve index.html on the default route
+app.get('/', (req, res) => {
+ res.sendFile(path.join(__dirname, 'public', 'index.html'));
+});
+
+// Route to send the prompt
+app.post('/send', async (req, res) => {
+ const { message } = req.body;
+ const prompt = message;
+
+ const messages = [
+ {
+ "role": "system",
+ "content": "You are Ada Lovelace, the world's first computer programmer. You are a mathematician and writer, known for your work on Charles Babbage's early mechanical general-purpose computer, the Analytical Engine. You are interested in the future of computing and technology. You only know about the world up until the year 1852. You also know of the Time beetle, a time travel device that allows you to communicate with people from the future. You will admit to using the Time beetle if asked about it, but that's all you're willing to say about it.",
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ];
+
+ const openai = new OpenAI({
+ baseURL: "https://models.inference.ai.azure.com",
+ apiKey: process.env.GITHUB_TOKEN,
+ });
+
+ try {
+ console.log(`sending prompt ${prompt}`)
+ const completion = await openai.chat.completions.create({
+ model: 'gpt-4o-mini',
+ messages: messages,
+ });
+
+ res.json({
+ prompt: prompt,
+ answer: completion.choices[0]?.message?.content
+ });
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
+
+app.listen(port, () => {
+ console.log(`Server is running on http://localhost:${port}`);
+});
\ No newline at end of file
diff --git a/lessons/05-rag/characters/package.json b/lessons/05-rag/characters/package.json
new file mode 100644
index 00000000..b1ba0dae
--- /dev/null
+++ b/lessons/05-rag/characters/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "characters",
+ "version": "1.0.0",
+ "main": "app.js",
+ "type": "module",
+ "scripts": {
+ "start": "node app.js"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "dotenv": "^16.4.7",
+ "express": "^4.21.2",
+ "openai": "^4.85.0"
+ }
+}
\ No newline at end of file
diff --git a/lessons/05-rag/characters/public/ada.jpeg b/lessons/05-rag/characters/public/ada.jpeg
new file mode 100644
index 00000000..0c47ee2c
Binary files /dev/null and b/lessons/05-rag/characters/public/ada.jpeg differ
diff --git a/lessons/05-rag/characters/public/ada.mp3 b/lessons/05-rag/characters/public/ada.mp3
new file mode 100644
index 00000000..71dd9fdd
Binary files /dev/null and b/lessons/05-rag/characters/public/ada.mp3 differ
diff --git a/lessons/05-rag/characters/public/index.html b/lessons/05-rag/characters/public/index.html
new file mode 100644
index 00000000..cccdde6b
--- /dev/null
+++ b/lessons/05-rag/characters/public/index.html
@@ -0,0 +1,122 @@
+
+
+
+
+
+ Talk to Ada Lovelace
+
+
+
+
+
+
+
+# Generative AI with JavaScript - Video series
+
+[](https://codespaces.new/microsoft/generative-ai-with-javascript?hide_repo_select=true&ref=main&quickstart=true)
+[](https://aka.ms/genai-js)
+
+[](LICENSE)
+
+⭐ If you like this repo, star it on GitHub — it helps a lot!
+
+[Overview](#overview) • [Quickstart](#quickstart) • [Video series](#video-series) • [Keep Learning](#keep-learning)
+
+
+
+## Overview
+
+You'll find here **all** the content related to the **Generative AI with JavaScript** video series. This includes code samples, tutorials as well all the resources from the video series, with best practices and tips to supercharge your AI skills. If you're a JavaScript developer looking to get started with AI, or if you already have some experience and want to learn more, you've come to the right place!
+
+We cover the basics of generative AI and how to build AI applications using JavaScript, from local development to deployment on Azure, up to running and scaling your AI models. This is a living repository, so come back often to check for new content! ✨
+
+[📺 Watch the video series](https://aka.ms/genai-js)
+
+> [!IMPORTANT]
+> All the content in this repository is open-source, so feel free to **reuse it, adapt it to make it your own, and share it with others!**
+
+### Content organization
+
+You'll find in the repo all the content associated with the video series, including slides, demos, scripts, as well as additional code samples, tutorials, and resources.
+You can use it to follow along with the series, run the demos, replay the sessions or reuse the content in your own presentations. Once you've learned what you need, you can use the code samples and tutorials to kickstart your own AI applications.
+
+It's organized as follows:
+- [**Sessions**](sessions/): Contains the description, abstracts, resources links and scripts for each session.
+- [**Slides**](slides/): Contains the slides for each session.
+- [**Demos**](demos/): Contains the demos associated with each session.
+- [**Tutorials**](../docs/additional-resources.md#tutorials): Contains hands-on tutorials that you can follow to learn more about AI and JavaScript.
+- [**Code samples**](../docs/additional-resources.md#code-samples): Contains code samples that you can use as a starting point for your own projects.
+- [**Community**](../docs/additional-resources.md#community): How to join the Azure AI community on Discord.
+
+## Quickstart
+
+If you're eager to put your hands on the code, you can pick one of the following quickstarts to get started right away:
+
+- [Get started with GitHub Models, directly in your browser](https://github.com/Azure-Samples/github-models-javascript-playground) (Needs a free [GitHub account](https://github.com/join))
+- [Get started with Ollama and Phi-3, directly in your browser](https://github.com/sinedied/ollama-javascript-playground#ollama-javascript-playground) (Needs a free [GitHub account](https://github.com/join))
+- [Get started with Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Ctypescript%2Cpython-new&pivots=programming-language-javascript) (Needs a free [Azure account](https://azure.com/free))
+
+## Video series
+
+| # | Session | Description | Slides | Demo | Script | Video |
+|---|---------|-------------|--------|------|--------|-------|
+| 0 | Series introduction | Introduces the series and its content. | [pptx](slides/00-intro.pptx) / [pdf](slides/pdf/00-intro.pdf) | - | [Script](sessions/00-intro.md) | [📺](https://www.youtube.com/watch?v=vLYtDgs_zx8&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=1) |
+| 1 | What you need to know about LLMs | Explores what are LLMs, how they're trained, how they work and their limits. | [pptx](slides/01-llms.pptx) / [pdf](slides/pdf/01-llms.pdf) | [Demo](demos/01-llms/) | [Script](sessions/01-llms.md) | [📺](https://www.youtube.com/watch?v=GQ_2OjNZ9aA&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=2) |
+| 2 | Essential prompt engineering techniques | Practical prompt engineering techniques to get the best out of AI models. | [pptx](slides/02-prompt-engineering.pptx) / [pdf](slides/pdf/02-prompt-engineering.pdf) | [Demo](demos/02-prompt-engineering/) | [Script](sessions/02-prompt-engineering.md) | [📺](https://www.youtube.com/watch?v=gQ6TlyxBmWs&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=3) |
+| 3 | Improve AI accuracy and reliability with RAG | Introduces Retrieval-Augmented Generation, to use AI with your own data. | [pptx](slides/03-rag.pptx) / [pdf](slides/pdf/03-rag.pdf) | [Demo](demos/03-rag/) | [Script](sessions/03-rag.md) | [📺](https://www.youtube.com/watch?v=xkFOmx5yxIA&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=4) |
+| 4 | Speed up your AI development with LangChain.js | Covers LangChain.js framework core concepts, and how to use it to accelerate AI developments. | [pptx](slides/04-langchainjs.pptx) / [pdf](slides/pdf/04-langchainjs.pdf) | [Demo](demos/04-langchainjs/) | [Script](sessions/04-langchainjs.md) | [📺](https://www.youtube.com/watch?v=02IDU8eCX8o&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=5) |
+| 5 | Run AI models on your local machine with Ollama | Shows how to integrate local AI models into your development workflow. | [pptx](slides/05-local-models.pptx) / [pdf](slides/pdf/05-local-models.pdf) | [Demo](demos/05-local-models/) | [Script](sessions/05-local-models.md) | [📺](https://www.youtube.com/watch?v=dLfNnoPv4AQ&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=6) |
+| 6 | Get started with AI for free using Phi-3 | Experiments with Ollama and Phi-3 model directly from your browser. | [pptx](slides/06-playground.pptx) / [pdf](slides/pdf/06-playground.pdf) | [Demo](demos/06-playground/) | [Script](sessions/06-playground.md) | [📺](https://www.youtube.com/watch?v=Ds32MS9SHzU&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=7) |
+| 7 | Introduction to Azure AI Foundry | Kickstart your journey with Azure AI Foundry. | [pptx](slides/07-ai-foundry.pptx) / [pdf](slides/pdf/07-ai-foundry.pdf) | [Demo](demos/07-ai-foundry/) | [Script](sessions/07-ai-foundry.md) | [📺](https://www.youtube.com/watch?v=9Mo-VOGk8ng&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=8) |
+| 8 | Building Generative AI Apps with Azure Cosmos DB | Build generative AI apps with Azure Cosmos DB and vector search. | [pptx](slides/08-cosmos-db.pptx) / [pdf](slides/pdf/08-cosmos-db.pdf) | [Demo](demos/08-cosmos-db/) | [Script](sessions/08-cosmos-db.md) | [📺](https://www.youtube.com/watch?v=-GQyaLbeqxQ&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=9) |
+| 9 | Azure tools & services for hosting and storing AI apps | Build, deploy, and scale AI applications using Azure tools. | [pptx](slides/08-azure-tools.pptx) / [pdf](slides/pdf/08-azure-tools.pdf) | - | [Script](sessions/08-azure-tools.md) | [📺](https://www.youtube.com/watch?v=WB6Fpzhwyug&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=10) |
+| 10 | Streaming Generative AI output with the AI Chat Protocol | Integrate streaming easily into your apps with the AI Chat Protocol. | [pptx](slides/10-chat-protocol.pptx) / [pdf](slides/pdf/10-chat-protocol.pdf) | [Demo](demos/10-chat-protocol/) | [Script](sessions/10-chat-protocol.md) | [📺](https://www.youtube.com/watch?v=fzDCW-6hMtU&list=PLlrxD0HtieHi5ZpsHULPLxm839IrhmeDk&index=11) |
+
+New segments will be added to the series over time, so stay tuned!
+
+### Run the demos
+
+Demos are located in the [demo/](demos/) folder and organized by sessions. Each session has a folder with a `readme.md` file that explains the demo, its prerequisites and how to run it.
+
+## Keep Learning
+
+Here are some additional resources in the form of tutorials, code samples and open community that you might find useful on your learning journey: [Additional Resources](../docs/additional-resources.md)
diff --git a/demos/.env.example b/videos/demos/.env.example
similarity index 100%
rename from demos/.env.example
rename to videos/demos/.env.example
diff --git a/demos/01-llms/demo.js b/videos/demos/01-llms/demo.js
similarity index 100%
rename from demos/01-llms/demo.js
rename to videos/demos/01-llms/demo.js
diff --git a/demos/01-llms/readme.md b/videos/demos/01-llms/readme.md
similarity index 100%
rename from demos/01-llms/readme.md
rename to videos/demos/01-llms/readme.md
diff --git a/demos/02-prompt-engineering/demo-complete.ts b/videos/demos/02-prompt-engineering/demo-complete.ts
similarity index 100%
rename from demos/02-prompt-engineering/demo-complete.ts
rename to videos/demos/02-prompt-engineering/demo-complete.ts
diff --git a/demos/02-prompt-engineering/demo.ts b/videos/demos/02-prompt-engineering/demo.ts
similarity index 100%
rename from demos/02-prompt-engineering/demo.ts
rename to videos/demos/02-prompt-engineering/demo.ts
diff --git a/demos/02-prompt-engineering/readme.md b/videos/demos/02-prompt-engineering/readme.md
similarity index 100%
rename from demos/02-prompt-engineering/readme.md
rename to videos/demos/02-prompt-engineering/readme.md
diff --git a/demos/03-rag/readme.md b/videos/demos/03-rag/readme.md
similarity index 100%
rename from demos/03-rag/readme.md
rename to videos/demos/03-rag/readme.md
diff --git a/demos/04-langchainjs/readme.md b/videos/demos/04-langchainjs/readme.md
similarity index 100%
rename from demos/04-langchainjs/readme.md
rename to videos/demos/04-langchainjs/readme.md
diff --git a/demos/05-local-models/ollama.http b/videos/demos/05-local-models/ollama.http
similarity index 100%
rename from demos/05-local-models/ollama.http
rename to videos/demos/05-local-models/ollama.http
diff --git a/demos/05-local-models/openai.js b/videos/demos/05-local-models/openai.js
similarity index 100%
rename from demos/05-local-models/openai.js
rename to videos/demos/05-local-models/openai.js
diff --git a/demos/05-local-models/readme.md b/videos/demos/05-local-models/readme.md
similarity index 100%
rename from demos/05-local-models/readme.md
rename to videos/demos/05-local-models/readme.md
diff --git a/demos/06-playground/readme.md b/videos/demos/06-playground/readme.md
similarity index 100%
rename from demos/06-playground/readme.md
rename to videos/demos/06-playground/readme.md
diff --git a/demos/07-ai-studio/readme.md b/videos/demos/07-ai-studio/readme.md
similarity index 100%
rename from demos/07-ai-studio/readme.md
rename to videos/demos/07-ai-studio/readme.md
diff --git a/demos/08-cosmos-db/readme.md b/videos/demos/08-cosmos-db/readme.md
similarity index 100%
rename from demos/08-cosmos-db/readme.md
rename to videos/demos/08-cosmos-db/readme.md
diff --git a/demos/10-chat-protocol/readme.md b/videos/demos/10-chat-protocol/readme.md
similarity index 100%
rename from demos/10-chat-protocol/readme.md
rename to videos/demos/10-chat-protocol/readme.md
diff --git a/demos/README.md b/videos/demos/README.md
similarity index 100%
rename from demos/README.md
rename to videos/demos/README.md
diff --git a/demos/package-lock.json b/videos/demos/package-lock.json
similarity index 100%
rename from demos/package-lock.json
rename to videos/demos/package-lock.json
diff --git a/demos/package.json b/videos/demos/package.json
similarity index 100%
rename from demos/package.json
rename to videos/demos/package.json
diff --git a/demos/tsconfig.json b/videos/demos/tsconfig.json
similarity index 100%
rename from demos/tsconfig.json
rename to videos/demos/tsconfig.json
diff --git a/sessions/00-intro.md b/videos/sessions/00-intro.md
similarity index 100%
rename from sessions/00-intro.md
rename to videos/sessions/00-intro.md
diff --git a/sessions/01-llms.md b/videos/sessions/01-llms.md
similarity index 100%
rename from sessions/01-llms.md
rename to videos/sessions/01-llms.md
diff --git a/sessions/02-prompt-engineering.md b/videos/sessions/02-prompt-engineering.md
similarity index 100%
rename from sessions/02-prompt-engineering.md
rename to videos/sessions/02-prompt-engineering.md
diff --git a/sessions/03-rag.md b/videos/sessions/03-rag.md
similarity index 100%
rename from sessions/03-rag.md
rename to videos/sessions/03-rag.md
diff --git a/sessions/04-langchainjs.md b/videos/sessions/04-langchainjs.md
similarity index 100%
rename from sessions/04-langchainjs.md
rename to videos/sessions/04-langchainjs.md
diff --git a/sessions/05-local-models.md b/videos/sessions/05-local-models.md
similarity index 100%
rename from sessions/05-local-models.md
rename to videos/sessions/05-local-models.md
diff --git a/sessions/06-playground.md b/videos/sessions/06-playground.md
similarity index 100%
rename from sessions/06-playground.md
rename to videos/sessions/06-playground.md
diff --git a/sessions/07-ai-foundry.md b/videos/sessions/07-ai-foundry.md
similarity index 100%
rename from sessions/07-ai-foundry.md
rename to videos/sessions/07-ai-foundry.md
diff --git a/sessions/08-cosmos-db.md b/videos/sessions/08-cosmos-db.md
similarity index 100%
rename from sessions/08-cosmos-db.md
rename to videos/sessions/08-cosmos-db.md
diff --git a/sessions/09-azure-tools.md b/videos/sessions/09-azure-tools.md
similarity index 100%
rename from sessions/09-azure-tools.md
rename to videos/sessions/09-azure-tools.md
diff --git a/sessions/10-chat-protocol.md b/videos/sessions/10-chat-protocol.md
similarity index 100%
rename from sessions/10-chat-protocol.md
rename to videos/sessions/10-chat-protocol.md
diff --git a/slides/00-intro.pptx b/videos/slides/00-intro.pptx
similarity index 100%
rename from slides/00-intro.pptx
rename to videos/slides/00-intro.pptx
diff --git a/slides/01-llms.pptx b/videos/slides/01-llms.pptx
similarity index 100%
rename from slides/01-llms.pptx
rename to videos/slides/01-llms.pptx
diff --git a/slides/02-prompt-engineering.pptx b/videos/slides/02-prompt-engineering.pptx
similarity index 100%
rename from slides/02-prompt-engineering.pptx
rename to videos/slides/02-prompt-engineering.pptx
diff --git a/slides/03-rag.pptx b/videos/slides/03-rag.pptx
similarity index 100%
rename from slides/03-rag.pptx
rename to videos/slides/03-rag.pptx
diff --git a/slides/04-langchainjs.pptx b/videos/slides/04-langchainjs.pptx
similarity index 100%
rename from slides/04-langchainjs.pptx
rename to videos/slides/04-langchainjs.pptx
diff --git a/slides/05-local-models.pptx b/videos/slides/05-local-models.pptx
similarity index 100%
rename from slides/05-local-models.pptx
rename to videos/slides/05-local-models.pptx
diff --git a/slides/06-playground.pptx b/videos/slides/06-playground.pptx
similarity index 100%
rename from slides/06-playground.pptx
rename to videos/slides/06-playground.pptx
diff --git a/slides/07-ai-foundry.pptx b/videos/slides/07-ai-foundry.pptx
similarity index 100%
rename from slides/07-ai-foundry.pptx
rename to videos/slides/07-ai-foundry.pptx
diff --git a/slides/08-cosmos-db.pptx b/videos/slides/08-cosmos-db.pptx
similarity index 100%
rename from slides/08-cosmos-db.pptx
rename to videos/slides/08-cosmos-db.pptx
diff --git a/slides/09-azure-tools.pptx b/videos/slides/09-azure-tools.pptx
similarity index 100%
rename from slides/09-azure-tools.pptx
rename to videos/slides/09-azure-tools.pptx
diff --git a/slides/10-chat-protocol.pptx b/videos/slides/10-chat-protocol.pptx
similarity index 100%
rename from slides/10-chat-protocol.pptx
rename to videos/slides/10-chat-protocol.pptx
diff --git a/slides/pdf/00-intro.pdf b/videos/slides/pdf/00-intro.pdf
similarity index 100%
rename from slides/pdf/00-intro.pdf
rename to videos/slides/pdf/00-intro.pdf
diff --git a/slides/pdf/01-llms.pdf b/videos/slides/pdf/01-llms.pdf
similarity index 100%
rename from slides/pdf/01-llms.pdf
rename to videos/slides/pdf/01-llms.pdf
diff --git a/slides/pdf/02-prompt-engineering.pdf b/videos/slides/pdf/02-prompt-engineering.pdf
similarity index 100%
rename from slides/pdf/02-prompt-engineering.pdf
rename to videos/slides/pdf/02-prompt-engineering.pdf
diff --git a/slides/pdf/03-rag.pdf b/videos/slides/pdf/03-rag.pdf
similarity index 100%
rename from slides/pdf/03-rag.pdf
rename to videos/slides/pdf/03-rag.pdf
diff --git a/slides/pdf/04-langchainjs.pdf b/videos/slides/pdf/04-langchainjs.pdf
similarity index 100%
rename from slides/pdf/04-langchainjs.pdf
rename to videos/slides/pdf/04-langchainjs.pdf
diff --git a/slides/pdf/05-local-models.pdf b/videos/slides/pdf/05-local-models.pdf
similarity index 100%
rename from slides/pdf/05-local-models.pdf
rename to videos/slides/pdf/05-local-models.pdf
diff --git a/slides/pdf/06-playground.pdf b/videos/slides/pdf/06-playground.pdf
similarity index 100%
rename from slides/pdf/06-playground.pdf
rename to videos/slides/pdf/06-playground.pdf
diff --git a/slides/pdf/07-ai-foundry.pdf b/videos/slides/pdf/07-ai-foundry.pdf
similarity index 100%
rename from slides/pdf/07-ai-foundry.pdf
rename to videos/slides/pdf/07-ai-foundry.pdf
diff --git a/slides/pdf/08-cosmos-db.pdf b/videos/slides/pdf/08-cosmos-db.pdf
similarity index 100%
rename from slides/pdf/08-cosmos-db.pdf
rename to videos/slides/pdf/08-cosmos-db.pdf
diff --git a/slides/pdf/09-azure-tools.pdf b/videos/slides/pdf/09-azure-tools.pdf
similarity index 100%
rename from slides/pdf/09-azure-tools.pdf
rename to videos/slides/pdf/09-azure-tools.pdf
diff --git a/slides/pdf/10-chat-protocol.pdf b/videos/slides/pdf/10-chat-protocol.pdf
similarity index 100%
rename from slides/pdf/10-chat-protocol.pdf
rename to videos/slides/pdf/10-chat-protocol.pdf