Skip to content

Commit 8b24be6

Browse files
committed
Day 8
1 parent 6236330 commit 8b24be6

File tree

108 files changed

+1036
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

108 files changed

+1036
-0
lines changed

leetcode-master/LLM-courseDeepAI/L1.ipynb

Lines changed: 567 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 341 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,341 @@
1+
#!/usr/bin/env python
2+
# coding: utf-8
3+
4+
# # Lab 1: Implementing self-editing memory from scratch
5+
6+
# ## Preparation
7+
# <p style="background-color:#fff6ff; padding:15px; border-width:3px; border-color:#efe6ef; border-style:solid; border-radius:6px"> 💻 &nbsp;
8+
# <b>Note: </b>The Letta libraries have been progressing since the filming of this course and you should be careful to use the requirements.txt file to load the revisions that are compatible with these notebooks if you download them and run them in your own evironment.
9+
# </p>
10+
11+
# <div style="background-color:#fff6ff; padding:13px; border-width:3px; border-color:#efe6ef; border-style:solid; border-radius:6px">
12+
# <p> 💻 &nbsp; <b>Access <code>requirements.txt</code> and <code>helper.py</code> files:</b> 1) click on the <em>"File"</em> option on the top menu of the notebook and then 2) click on <em>"Open"</em>.
13+
#
14+
# <p> ⬇ &nbsp; <b>Download Notebooks:</b> 1) click on the <em>"File"</em> option on the top menu of the notebook and then 2) click on <em>"Download as"</em> and select <em>"Notebook (.ipynb)"</em>.</p>
15+
#
16+
# <p> 📒 &nbsp; For more help, please see the <em>"Appendix – Tips, Help, and Download"</em> Lesson.</p>
17+
# </div>
18+
19+
# <p style="background-color:#f7fff8; padding:15px; border-width:3px; border-color:#e0f0e0; border-style:solid; border-radius:6px"> 🚨
20+
# &nbsp; <b>Different Run Results:</b> The output generated by AI models can vary with each execution due to their dynamic, probabilistic nature. Your results may differ from those shown in the video.</p>
21+
22+
# ## Section 0: Setup OpenAI
23+
24+
# In[ ]:
25+
26+
27+
from helper import get_openai_api_key
28+
openai_api_key = get_openai_api_key()
29+
30+
31+
# In[ ]:
32+
33+
34+
from openai import OpenAI
35+
import os
36+
37+
client = OpenAI(
38+
api_key=openai_api_key
39+
)
40+
41+
42+
# ## Section 1: Breaking down the LLM context window
43+
# ### A simple agent's context window
44+
45+
# In[ ]:
46+
47+
48+
model = "gpt-4o-mini"
49+
50+
51+
# In[ ]:
52+
53+
54+
system_prompt = "You are a chatbot."
55+
56+
57+
# In[ ]:
58+
59+
60+
# Make the completion request with the tool usage
61+
chat_completion = client.chat.completions.create(
62+
model=model,
63+
messages=[
64+
# system prompt: always included in the context window
65+
{"role": "system", "content": system_prompt},
66+
# chat history (evolves over time)
67+
{"role": "user", "content": "What is my name?"},
68+
]
69+
)
70+
chat_completion.choices[0].message.content
71+
72+
73+
# ### Adding memory to the context
74+
#
75+
76+
# In[ ]:
77+
78+
79+
agent_memory = {"human": "Name: Bob"}
80+
system_prompt = "You are a chatbot. " \
81+
+ "You have a section of your context called [MEMORY] " \
82+
+ "that contains information relevant to your conversation"
83+
84+
85+
# In[ ]:
86+
87+
88+
import json
89+
90+
91+
chat_completion = client.chat.completions.create(
92+
model=model,
93+
messages=[
94+
# system prompt
95+
{"role": "system", "content": system_prompt + "[MEMORY]\n" + \
96+
json.dumps(agent_memory)},
97+
# chat history
98+
{"role": "user", "content": "What is my name?"},
99+
],
100+
)
101+
chat_completion.choices[0].message.content
102+
103+
104+
# ## Section 2: Modifing the memory with tools
105+
106+
# ### Defining a memory editing tool
107+
#
108+
109+
# In[ ]:
110+
111+
112+
agent_memory = {"human": "", "agent": ""}
113+
114+
def core_memory_save(section: str, memory: str):
115+
agent_memory[section] += '\n'
116+
agent_memory[section] += memory
117+
118+
119+
# In[ ]:
120+
121+
122+
agent_memory
123+
124+
125+
# In[ ]:
126+
127+
128+
core_memory_save("human", "The human's name is Charles")
129+
130+
131+
# In[ ]:
132+
133+
134+
agent_memory
135+
136+
137+
# In[ ]:
138+
139+
140+
# tool description
141+
core_memory_save_description = "Save important information about you," \
142+
+ "the agent or the human you are chatting with."
143+
144+
# arguments into the tool (generated by the LLM)
145+
# defines what the agent must generate to input into the tool
146+
core_memory_save_properties = \
147+
{
148+
# arg 1: section of memory to edit
149+
"section": {
150+
"type": "string",
151+
"enum": ["human", "agent"],
152+
"description": "Must be either 'human' " \
153+
+ "(to save information about the human) or 'agent'" \
154+
+ "(to save information about yourself)",
155+
},
156+
# arg 2: memory to save
157+
"memory": {
158+
"type": "string",
159+
"description": "Memory to save in the section",
160+
},
161+
}
162+
163+
# tool schema (passed to OpenAI)
164+
core_memory_save_metadata = \
165+
{
166+
"type": "function",
167+
"function": {
168+
"name": "core_memory_save",
169+
"description": core_memory_save_description,
170+
"parameters": {
171+
"type": "object",
172+
"properties": core_memory_save_properties,
173+
"required": ["section", "memory"],
174+
},
175+
}
176+
}
177+
178+
179+
# In[ ]:
180+
181+
182+
agent_memory = {"human": ""}
183+
system_prompt = "You are a chatbot. " \
184+
+ "You have a section of your context called [MEMORY] " \
185+
+ "that contains information relevant to your conversation"
186+
187+
chat_completion = client.chat.completions.create(
188+
model=model,
189+
messages=[
190+
# system prompt
191+
{"role": "system", "content": system_prompt},
192+
# memory
193+
{"role": "system", "content": "[MEMORY]\n" + json.dumps(agent_memory)},
194+
# chat history
195+
{"role": "user", "content": "My name is Bob"},
196+
],
197+
# tool schemas
198+
tools=[core_memory_save_metadata]
199+
)
200+
response = chat_completion.choices[0]
201+
response
202+
203+
204+
# ### Executing the tool
205+
#
206+
207+
# In[ ]:
208+
209+
210+
arguments = json.loads(response.message.tool_calls[0].function.arguments)
211+
arguments
212+
213+
214+
# In[ ]:
215+
216+
217+
# run the function with the specified arguments
218+
core_memory_save(**arguments)
219+
220+
221+
# In[ ]:
222+
223+
224+
agent_memory
225+
226+
227+
# ### Running the next agent step
228+
#
229+
230+
# In[ ]:
231+
232+
233+
chat_completion = client.chat.completions.create(
234+
model=model,
235+
messages=[
236+
# system prompt
237+
{"role": "system", "content": system_prompt},
238+
# memory
239+
{"role": "system", "content": "[MEMORY]\n" + json.dumps(agent_memory)},
240+
# chat history
241+
{"role": "user", "content": "what is my name"},
242+
],
243+
tools=[core_memory_save_metadata]
244+
)
245+
response = chat_completion.choices[0]
246+
response.message
247+
248+
249+
# ## Implementing an agentic loop
250+
#
251+
252+
# In[ ]:
253+
254+
255+
agent_memory = {"human": ""}
256+
257+
258+
# In[ ]:
259+
260+
261+
system_prompt_os = system_prompt \
262+
+ "\n. You must either call a tool (core_memory_save) or" \
263+
+ "write a response to the user. " \
264+
+ "Do not take the same actions multiple times!" \
265+
+ "When you learn new information, make sure to always" \
266+
+ "call the core_memory_save tool."
267+
268+
269+
# In[ ]:
270+
271+
272+
def agent_step(user_message):
273+
274+
# prefix messages with system prompt and memory
275+
messages = [
276+
# system prompt
277+
{"role": "system", "content": system_prompt_os},
278+
# memory
279+
{
280+
"role": "system",
281+
"content": "[MEMORY]\n" + json.dumps(agent_memory)
282+
},
283+
]
284+
285+
# append the most recent message
286+
messages.append({"role": "user", "content": user_message})
287+
288+
# agentic loop
289+
while True:
290+
chat_completion = client.chat.completions.create(
291+
model=model,
292+
messages=messages,
293+
tools=[core_memory_save_metadata]
294+
)
295+
response = chat_completion.choices[0]
296+
297+
# update the messages with the agent's response
298+
messages.append(response.message)
299+
300+
# if NOT calling a tool (responding to the user), return
301+
if not response.message.tool_calls:
302+
return response.message.content
303+
304+
# if calling a tool, execute the tool
305+
else:
306+
print("TOOL CALL:", response.message.tool_calls[0].function)
307+
308+
# parse the arguments from the LLM function call
309+
arguments = json.loads(
310+
response.message.tool_calls[0].function.arguments
311+
)
312+
313+
# run the function with the specified arguments
314+
core_memory_save(**arguments)
315+
316+
# add the tool call response to the message history
317+
messages.append({
318+
"role": "tool",
319+
"tool_call_id": response.message.tool_calls[0].id,
320+
"name": "core_memory_save",
321+
"content": f"Updated memory: {json.dumps(agent_memory)}"
322+
})
323+
324+
325+
# In[ ]:
326+
327+
328+
agent_step("my name is bob.")
329+
330+
331+
# In[ ]:
332+
333+
334+
# Try some prompts of your own!
335+
336+
337+
# In[ ]:
338+
339+
340+
341+
Binary file not shown.

0 commit comments

Comments
 (0)