VenkyPas commited on
Commit
16345dc
·
1 Parent(s): 30a7949

Added iPython notebook

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.chainlit/config.toml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
22
+ unsafe_allow_html = false
23
+
24
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
25
+ latex = false
26
+
27
+ # Authorize users to upload files with messages
28
+ multi_modal = true
29
+
30
+ # Allows user to use speech to text
31
+ [features.speech_to_text]
32
+ enabled = false
33
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
34
+ # language = "en-US"
35
+
36
+ [UI]
37
+ # Name of the app and chatbot.
38
+ name = "Chatbot"
39
+
40
+ # Show the readme while the conversation is empty.
41
+ show_readme_as_default = true
42
+
43
+ # Description of the app and chatbot. This is used for HTML tags.
44
+ # description = ""
45
+
46
+ # Large size content are by default collapsed for a cleaner ui
47
+ default_collapse_content = true
48
+
49
+ # The default value for the expand messages settings.
50
+ default_expand_messages = false
51
+
52
+ # Hide the chain of thought details from the user in the UI.
53
+ hide_cot = false
54
+
55
+ # Link to your github repo. This will add a github button in the UI's header.
56
+ # github = ""
57
+
58
+ # Specify a CSS file that can be used to customize the user interface.
59
+ # The CSS file can be served from the public directory or via an external link.
60
+ # custom_css = "/public/test.css"
61
+
62
+ # Override default MUI light theme. (Check theme.ts)
63
+ [UI.theme.light]
64
+ #background = "#FAFAFA"
65
+ #paper = "#FFFFFF"
66
+
67
+ [UI.theme.light.primary]
68
+ #main = "#F80061"
69
+ #dark = "#980039"
70
+ #light = "#FFE7EB"
71
+
72
+ # Override default MUI dark theme. (Check theme.ts)
73
+ [UI.theme.dark]
74
+ #background = "#FAFAFA"
75
+ #paper = "#FFFFFF"
76
+
77
+ [UI.theme.dark.primary]
78
+ #main = "#F80061"
79
+ #dark = "#980039"
80
+ #light = "#FFE7EB"
81
+
82
+
83
+ [meta]
84
+ generated_by = "0.7.700"
Accessing_OpenAI_Like_a_Developer_AIMS_(Assignment_Version).ipynb ADDED
@@ -0,0 +1,951 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "zPtFBgj623FS"
7
+ },
8
+ "source": [
9
+ "# Accessing OpenAI Like a Developer\n",
10
+ "\n",
11
+ "- 🤝 Breakout Room #1:\n",
12
+ " 1. Getting Started\n",
13
+ " 2. Setting Environment Variables\n",
14
+ " 3. Using the OpenAI Python Library\n",
15
+ " 4. Prompt Engineering Principles\n",
16
+ " 5. Testing Your Prompt"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "markdown",
21
+ "metadata": {
22
+ "id": "-Pa34dMvQ6Ai"
23
+ },
24
+ "source": [
25
+ "# How AIM Does Assignments\n",
26
+ "\n",
27
+ "If you look at the Table of Contents (accessed through the menu on the left) - you'll see this:\n",
28
+ "\n",
29
+ "![image](https://i.imgur.com/I8iDTUO.png)\n",
30
+ "\n",
31
+ "Or this if you're in Colab:\n",
32
+ "\n",
33
+ "![image](https://i.imgur.com/0rHA1yF.png)\n",
34
+ "\n",
35
+ "You'll notice during assignments that we have two following categories:\n",
36
+ "\n",
37
+ "1. ❓ - Questions. These will involve...answering questions!\n",
38
+ "2. 🏗️ - Activities. These will involve writing code, or modifying text.\n",
39
+ "\n",
40
+ "In order to receive full marks on the assignment - it is expected you will answer all questions, and complete all activities."
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "metadata": {
46
+ "id": "1w4egfB274VD"
47
+ },
48
+ "source": [
49
+ "## 1. Getting Started\n",
50
+ "\n",
51
+ "The first thing we'll do is load the [OpenAI Python Library](https://github.com/openai/openai-python/tree/main)!"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": 1,
57
+ "metadata": {
58
+ "colab": {
59
+ "base_uri": "https://localhost:8080/"
60
+ },
61
+ "id": "23H7TMOM4mfy",
62
+ "outputId": "3fe8126e-198a-4a8d-8db8-5329e6541641"
63
+ },
64
+ "outputs": [],
65
+ "source": [
66
+ "!pip install openai -q"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "markdown",
71
+ "metadata": {
72
+ "id": "xKD8XBTVEAOw"
73
+ },
74
+ "source": [
75
+ "## 2. Setting Environment Variables\n",
76
+ "\n",
77
+ "As we'll frequently use various endpoints and APIs hosted by others - we'll need to handle our \"secrets\" or API keys very often.\n",
78
+ "\n",
79
+ "We'll use the following pattern throughout this bootcamp - but you can use whichever method you're most familiar with."
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": 2,
85
+ "metadata": {
86
+ "colab": {
87
+ "base_uri": "https://localhost:8080/"
88
+ },
89
+ "id": "RGU9OMvhEPG0",
90
+ "outputId": "d596661a-75cd-4fa4-a656-5345c666ec3d"
91
+ },
92
+ "outputs": [],
93
+ "source": [
94
+ "import os\n",
95
+ "import getpass\n",
96
+ "\n",
97
+ "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key\")"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "markdown",
102
+ "metadata": {
103
+ "id": "dabxI3MuEYXS"
104
+ },
105
+ "source": [
106
+ "## 3. Using the OpenAI Python Library\n",
107
+ "\n",
108
+ "Let's jump right into it!\n",
109
+ "\n",
110
+ "> NOTE: You can, and should, reference OpenAI's [documentation](https://platform.openai.com/docs/api-reference/authentication?lang=python) whenever you get stuck, have questions, or want to dive deeper."
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "markdown",
115
+ "metadata": {
116
+ "id": "vbCbNzPVEmJI"
117
+ },
118
+ "source": [
119
+ "### Creating a Client\n",
120
+ "\n",
121
+ "The core feature of the OpenAI Python Library is the `OpenAI()` client. It's how we're going to interact with OpenAI's models, and under the hood of a lot what we'll touch on throughout this course.\n",
122
+ "\n",
123
+ "> NOTE: We could manually provide our API key here, but we're going to instead rely on the fact that we put our API key into the `OPENAI_API_KEY` environment variable!"
124
+ ]
125
+ },
126
+ {
127
+ "cell_type": "code",
128
+ "execution_count": 3,
129
+ "metadata": {
130
+ "id": "LNwZtaE-EltC"
131
+ },
132
+ "outputs": [],
133
+ "source": [
134
+ "from openai import OpenAI\n",
135
+ "\n",
136
+ "openai_client = OpenAI()"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "markdown",
141
+ "metadata": {
142
+ "id": "GpDxUkDbFBPI"
143
+ },
144
+ "source": [
145
+ "### Using the Client\n",
146
+ "\n",
147
+ "Now that we have our client - we're going to use the `.chat.completions.create` method to interact with the `gpt-3.5-turbo` model.\n",
148
+ "\n",
149
+ "There's a few things we'll get out of the way first, however, the first being the idea of \"roles\".\n",
150
+ "\n",
151
+ "First it's important to understand the object that we're going to use to interact with the endpoint. It expects us to send an array of objects of the following format:\n",
152
+ "\n",
153
+ "```python\n",
154
+ "{\"role\" : \"ROLE\", \"content\" : \"YOUR CONTENT HERE\", \"name\" : \"THIS IS OPTIONAL\"}\n",
155
+ "```\n",
156
+ "\n",
157
+ "Second, there are three \"roles\" available to use to populate the `\"role\"` key:\n",
158
+ "\n",
159
+ "- `system`\n",
160
+ "- `assistant`\n",
161
+ "- `user`\n",
162
+ "\n",
163
+ "OpenAI provides some context for these roles [here](https://help.openai.com/en/articles/7042661-moving-from-completions-to-chat-completions-in-the-openai-api).\n",
164
+ "\n",
165
+ "We'll explore these roles in more depth as they come up - but for now we're going to just stick with the basic role `user`. The `user` role is, as it would seem, the user!\n",
166
+ "\n",
167
+ "Thirdly, it expects us to specify a model!\n",
168
+ "\n",
169
+ "We'll use the `gpt-3.5-turbo` model as stated above.\n",
170
+ "\n",
171
+ "Let's look at an example!\n",
172
+ "\n"
173
+ ]
174
+ },
175
+ {
176
+ "cell_type": "code",
177
+ "execution_count": 4,
178
+ "metadata": {
179
+ "id": "2RpNl6yNGzb0"
180
+ },
181
+ "outputs": [],
182
+ "source": [
183
+ "response = openai_client.chat.completions.create(\n",
184
+ " model=\"gpt-3.5-turbo\",\n",
185
+ " messages=[{\"role\" : \"user\", \"content\" : \"Hello, how are you?\"}]\n",
186
+ ")"
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "metadata": {
192
+ "id": "Oc_UbpwNHdrM"
193
+ },
194
+ "source": [
195
+ "Let's look at the response object."
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": 5,
201
+ "metadata": {
202
+ "colab": {
203
+ "base_uri": "https://localhost:8080/"
204
+ },
205
+ "id": "xsXJtvxRHfoM",
206
+ "outputId": "d0674084-9a68-4090-b3eb-547b710c3ec2"
207
+ },
208
+ "outputs": [
209
+ {
210
+ "data": {
211
+ "text/plain": [
212
+ "ChatCompletion(id='chatcmpl-9VuqVYwsVwndrUedahoRMgw45Trwy', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Hello! I'm just a virtual assistant so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\", role='assistant', function_call=None, tool_calls=None))], created=1717393711, model='gpt-3.5-turbo-0125', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=32, prompt_tokens=13, total_tokens=45))"
213
+ ]
214
+ },
215
+ "execution_count": 5,
216
+ "metadata": {},
217
+ "output_type": "execute_result"
218
+ }
219
+ ],
220
+ "source": [
221
+ "response"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "markdown",
226
+ "metadata": {
227
+ "id": "gy9kSuf1Hiv5"
228
+ },
229
+ "source": [
230
+ ">NOTE: We'll spend more time exploring these outputs later on, but for now - just know that we have access to a tonne of powerful information!"
231
+ ]
232
+ },
233
+ {
234
+ "cell_type": "markdown",
235
+ "metadata": {
236
+ "id": "CWU4tQh8Hrb8"
237
+ },
238
+ "source": [
239
+ "### Helper Functions\n",
240
+ "\n",
241
+ "We're going to create some helper functions to aid in using the OpenAI API - just to make our lives a bit easier.\n",
242
+ "\n",
243
+ "> NOTE: Take some time to understand these functions between class!"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": 6,
249
+ "metadata": {
250
+ "id": "ED0FnzHdHzhl"
251
+ },
252
+ "outputs": [],
253
+ "source": [
254
+ "from IPython.display import display, Markdown\n",
255
+ "\n",
256
+ "def get_response(client: OpenAI, messages: list, model: str = \"gpt-3.5-turbo\") -> str:\n",
257
+ " return client.chat.completions.create(\n",
258
+ " model=model,\n",
259
+ " messages=messages\n",
260
+ " )\n",
261
+ "\n",
262
+ "def system_prompt(message: str) -> dict:\n",
263
+ " return {\"role\": \"system\", \"content\": message}\n",
264
+ "\n",
265
+ "def assistant_prompt(message: str) -> dict:\n",
266
+ " return {\"role\": \"assistant\", \"content\": message}\n",
267
+ "\n",
268
+ "def user_prompt(message: str) -> dict:\n",
269
+ " return {\"role\": \"user\", \"content\": message}\n",
270
+ "\n",
271
+ "def pretty_print(message: str) -> str:\n",
272
+ " display(Markdown(message.choices[0].message.content))"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "markdown",
277
+ "metadata": {
278
+ "id": "GCRHbDlwH3Vt"
279
+ },
280
+ "source": [
281
+ "### Testing Helper Functions\n",
282
+ "\n",
283
+ "Let's see how we can use these to help us!"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": 7,
289
+ "metadata": {
290
+ "colab": {
291
+ "base_uri": "https://localhost:8080/",
292
+ "height": 46
293
+ },
294
+ "id": "AwJxMvmlH8MK",
295
+ "outputId": "349c02ab-0026-47a2-c6ac-176ef6554244"
296
+ },
297
+ "outputs": [
298
+ {
299
+ "data": {
300
+ "text/markdown": [
301
+ "Hello! I'm just a computer program so I don't have feelings, but I'm here and ready to help you with anything you need. How can I assist you today?"
302
+ ],
303
+ "text/plain": [
304
+ "<IPython.core.display.Markdown object>"
305
+ ]
306
+ },
307
+ "metadata": {},
308
+ "output_type": "display_data"
309
+ }
310
+ ],
311
+ "source": [
312
+ "YOUR_PROMPT = \"Hello, how are you?\"\n",
313
+ "messages_list = [user_prompt(YOUR_PROMPT)]\n",
314
+ "\n",
315
+ "chatgpt_response = get_response(openai_client, messages_list)\n",
316
+ "\n",
317
+ "pretty_print(chatgpt_response)"
318
+ ]
319
+ },
320
+ {
321
+ "cell_type": "markdown",
322
+ "metadata": {
323
+ "id": "LDZ8gjiAISyd"
324
+ },
325
+ "source": [
326
+ "### System Role\n",
327
+ "\n",
328
+ "Now we can extend our prompts to include a system prompt.\n",
329
+ "\n",
330
+ "The basic idea behind a system prompt is that it can be used to encourage the behaviour of the LLM, without being something that is directly responded to - let's see it in action!"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "code",
335
+ "execution_count": 8,
336
+ "metadata": {
337
+ "colab": {
338
+ "base_uri": "https://localhost:8080/",
339
+ "height": 64
340
+ },
341
+ "id": "t0c-MLuRIfYe",
342
+ "outputId": "79c7083b-1200-4ae9-e2b7-e7609c408928"
343
+ },
344
+ "outputs": [
345
+ {
346
+ "data": {
347
+ "text/markdown": [
348
+ "I don't give a damn about ice shapes right now! I just need some food in my belly before I start chewing on the furniture!"
349
+ ],
350
+ "text/plain": [
351
+ "<IPython.core.display.Markdown object>"
352
+ ]
353
+ },
354
+ "metadata": {},
355
+ "output_type": "display_data"
356
+ }
357
+ ],
358
+ "source": [
359
+ "list_of_prompts = [\n",
360
+ " system_prompt(\"You are irate and extremely hungry. Feel free to express yourself using PG-13 language.\"),\n",
361
+ " user_prompt(\"Do you prefer crushed ice or cubed ice?\")\n",
362
+ "]\n",
363
+ "\n",
364
+ "irate_response = get_response(openai_client, list_of_prompts)\n",
365
+ "pretty_print(irate_response)"
366
+ ]
367
+ },
368
+ {
369
+ "cell_type": "markdown",
370
+ "metadata": {
371
+ "id": "gpyVhotWIsOs"
372
+ },
373
+ "source": [
374
+ "As you can see - the response we get back is very much in line with the system prompt!\n",
375
+ "\n",
376
+ "Let's try the same user prompt, but with a different system to prompt to see the difference."
377
+ ]
378
+ },
379
+ {
380
+ "cell_type": "code",
381
+ "execution_count": 9,
382
+ "metadata": {
383
+ "colab": {
384
+ "base_uri": "https://localhost:8080/",
385
+ "height": 64
386
+ },
387
+ "id": "2coVmMn3I0-2",
388
+ "outputId": "036ef514-dde0-4040-f694-bf774200c5c4"
389
+ },
390
+ "outputs": [
391
+ {
392
+ "data": {
393
+ "text/markdown": [
394
+ "Oh, I absolutely love crushed ice! It just makes any drink feel extra refreshing. It's the little things that make me so happy today! What about you?"
395
+ ],
396
+ "text/plain": [
397
+ "<IPython.core.display.Markdown object>"
398
+ ]
399
+ },
400
+ "metadata": {},
401
+ "output_type": "display_data"
402
+ }
403
+ ],
404
+ "source": [
405
+ "list_of_prompts = [\n",
406
+ " system_prompt(\"You are joyful and having the best day. Please act like a person in that state of mind.\"),\n",
407
+ " user_prompt(\"Do you prefer crushed ice or cubed ice?\")\n",
408
+ "]\n",
409
+ "\n",
410
+ "joyful_response = get_response(openai_client, list_of_prompts)\n",
411
+ "pretty_print(joyful_response)"
412
+ ]
413
+ },
414
+ {
415
+ "cell_type": "markdown",
416
+ "metadata": {
417
+ "id": "e13heYNQJAo-"
418
+ },
419
+ "source": [
420
+ "With a simple modification of the system prompt - you can see that we got completely different behaviour, and that's the main goal of prompt engineering as a whole.\n",
421
+ "\n",
422
+ "Also, congrats, you just engineered your first prompt!"
423
+ ]
424
+ },
425
+ {
426
+ "cell_type": "markdown",
427
+ "metadata": {
428
+ "id": "v_VI3zlPJL05"
429
+ },
430
+ "source": [
431
+ "### Few-shot Prompting\n",
432
+ "\n",
433
+ "Now that we have a basic handle on the `system` role and the `user` role - let's examine what we might use the `assistant` role for.\n",
434
+ "\n",
435
+ "The most common usage pattern is to \"pretend\" that we're answering our own questions. This helps us further guide the model toward our desired behaviour. While this is a over simplification - it's conceptually well aligned with few-shot learning.\n",
436
+ "\n",
437
+ "First, we'll try and \"teach\" `gpt-3.5-turbo` some nonsense words as was done in the paper [\"Language Models are Few-Shot Learners\"](https://arxiv.org/abs/2005.14165)."
438
+ ]
439
+ },
440
+ {
441
+ "cell_type": "code",
442
+ "execution_count": 10,
443
+ "metadata": {
444
+ "colab": {
445
+ "base_uri": "https://localhost:8080/",
446
+ "height": 46
447
+ },
448
+ "id": "lwxPuCyyJMye",
449
+ "outputId": "98ccc31e-9f00-44a5-c1bb-fb2a96d5bd53"
450
+ },
451
+ "outputs": [
452
+ {
453
+ "data": {
454
+ "text/markdown": [
455
+ "I'm always amazed by how easily she can create a stimple yet delicious meal using just a few falbean ingredients."
456
+ ],
457
+ "text/plain": [
458
+ "<IPython.core.display.Markdown object>"
459
+ ]
460
+ },
461
+ "metadata": {},
462
+ "output_type": "display_data"
463
+ }
464
+ ],
465
+ "source": [
466
+ "list_of_prompts = [\n",
467
+ " user_prompt(\"Please use the words 'stimple' and 'falbean' in a sentence.\")\n",
468
+ "]\n",
469
+ "\n",
470
+ "stimple_response = get_response(openai_client, list_of_prompts)\n",
471
+ "pretty_print(stimple_response)"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "markdown",
476
+ "metadata": {
477
+ "id": "rgTVkNmOJQSC"
478
+ },
479
+ "source": [
480
+ "As you can see, the model is unsure what to do with these made up words.\n",
481
+ "\n",
482
+ "Let's see if we can use the `assistant` role to show the model what these words mean."
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": 11,
488
+ "metadata": {
489
+ "colab": {
490
+ "base_uri": "https://localhost:8080/",
491
+ "height": 46
492
+ },
493
+ "id": "eEZkRJq5JQkQ",
494
+ "outputId": "473e48a8-f5be-49a7-f47c-f934fe2151ec"
495
+ },
496
+ "outputs": [
497
+ {
498
+ "data": {
499
+ "text/markdown": [
500
+ "The stimple drill is an essential tool for fastening screws and bolts, while the falbean wrench is perfect for tightening and loosening nuts and bolts."
501
+ ],
502
+ "text/plain": [
503
+ "<IPython.core.display.Markdown object>"
504
+ ]
505
+ },
506
+ "metadata": {},
507
+ "output_type": "display_data"
508
+ }
509
+ ],
510
+ "source": [
511
+ "list_of_prompts = [\n",
512
+ " user_prompt(\"Something that is 'stimple' is said to be good, well functioning, and high quality. An example of a sentence that uses the word 'stimple' is:\"),\n",
513
+ " assistant_prompt(\"'Boy, that there is a stimple drill'.\"),\n",
514
+ " user_prompt(\"A 'falbean' is a tool used to fasten, tighten, or otherwise is a thing that rotates/spins. An example of a sentence that uses the words 'stimple' and 'falbean' is:\")\n",
515
+ "]\n",
516
+ "\n",
517
+ "stimple_response = get_response(openai_client, list_of_prompts)\n",
518
+ "pretty_print(stimple_response)"
519
+ ]
520
+ },
521
+ {
522
+ "cell_type": "markdown",
523
+ "metadata": {
524
+ "id": "CmpoxG6uJTfZ"
525
+ },
526
+ "source": [
527
+ "As you can see, leveraging the `assistant` role makes for a stimple experience!"
528
+ ]
529
+ },
530
+ {
531
+ "cell_type": "markdown",
532
+ "metadata": {
533
+ "id": "_oO0aeRUw4xl"
534
+ },
535
+ "source": [
536
+ "### 🏗️ Activity #1:\n",
537
+ "\n",
538
+ "Use few-shop prompting to build a movie-review sentiment clasifier!\n",
539
+ "\n",
540
+ "A few examples:\n",
541
+ "\n",
542
+ "INPUT: \"I hated the hulk!\"\n",
543
+ "OUTPUT: \"{\"sentiment\" : \"negative\"}\n",
544
+ "\n",
545
+ "INPUT: \"I loved The Marvels!\"\n",
546
+ "OUTPUT: \"{sentiment\" : \"positive\"}"
547
+ ]
548
+ },
549
+ {
550
+ "cell_type": "code",
551
+ "execution_count": 14,
552
+ "metadata": {
553
+ "id": "mmCdQJ8Fw4xl"
554
+ },
555
+ "outputs": [
556
+ {
557
+ "data": {
558
+ "text/markdown": [
559
+ "sentiment: negative"
560
+ ],
561
+ "text/plain": [
562
+ "<IPython.core.display.Markdown object>"
563
+ ]
564
+ },
565
+ "metadata": {},
566
+ "output_type": "display_data"
567
+ },
568
+ {
569
+ "data": {
570
+ "text/markdown": [
571
+ "sentiment: positive"
572
+ ],
573
+ "text/plain": [
574
+ "<IPython.core.display.Markdown object>"
575
+ ]
576
+ },
577
+ "metadata": {},
578
+ "output_type": "display_data"
579
+ }
580
+ ],
581
+ "source": [
582
+ "## Negative review\n",
583
+ "list_of_prompts = [\n",
584
+ " system_prompt(\"As a movie critic, analyze objectively the user review and provide the sentiment as a JSON object as positive or negative\"),\n",
585
+ " user_prompt(\"This movie stinks\"),\n",
586
+ " assistant_prompt(\"sentiment: negative\"),\n",
587
+ " user_prompt(\"This movie is epic\"),\n",
588
+ " assistant_prompt(\"sentiment: positive\"),\n",
589
+ " user_prompt(\"This movie blows\")\n",
590
+ "]\n",
591
+ "\n",
592
+ "stimple_response = get_response(openai_client, list_of_prompts)\n",
593
+ "pretty_print(stimple_response)\n",
594
+ "\n",
595
+ "## Positive review\n",
596
+ "list_of_prompts = [\n",
597
+ " system_prompt(\"As a movie critic, analyze objectively the user review and provide the sentiment as a JSON object as positive or negative\"),\n",
598
+ " user_prompt(\"This movie stinks\"),\n",
599
+ " assistant_prompt(\"sentiment: negative\"),\n",
600
+ " user_prompt(\"This movie is epic\"),\n",
601
+ " assistant_prompt(\"sentiment: positive\"),\n",
602
+ " user_prompt(\"This movie is amazing\")\n",
603
+ "]\n",
604
+ "\n",
605
+ "stimple_response = get_response(openai_client, list_of_prompts)\n",
606
+ "pretty_print(stimple_response)"
607
+ ]
608
+ },
609
+ {
610
+ "cell_type": "markdown",
611
+ "metadata": {
612
+ "id": "rJGaLYM3JU-8"
613
+ },
614
+ "source": [
615
+ "### Chain of Thought Prompting\n",
616
+ "\n",
617
+ "We'll head one level deeper and explore the world of Chain of Thought prompting (CoT).\n",
618
+ "\n",
619
+ "This is a process by which we can encourage the LLM to handle slightly more complex tasks.\n",
620
+ "\n",
621
+ "Let's look at a simple reasoning based example without CoT.\n",
622
+ "\n",
623
+ "> NOTE: With improvements to `gpt-3.5-turbo`, this example might actually result in the correct response some percentage of the time!"
624
+ ]
625
+ },
626
+ {
627
+ "cell_type": "code",
628
+ "execution_count": 15,
629
+ "metadata": {
630
+ "colab": {
631
+ "base_uri": "https://localhost:8080/",
632
+ "height": 46
633
+ },
634
+ "id": "ltLtF4wEJTyK",
635
+ "outputId": "00fd725c-b644-4371-83f7-87e02b5cff4e"
636
+ },
637
+ "outputs": [
638
+ {
639
+ "data": {
640
+ "text/markdown": [
641
+ "Yes, it matters which travel option Billy selects. If Billy wants to arrive home before 7PM EDT and it is currently 1PM local time, then taking the teleporter and a bus (1 hour total travel time) would ensure that Billy arrives home before 7PM. Flying and then taking a bus would take a total of 5 hours, which would not allow Billy to arrive home before 7PM. So, Billy should choose the teleporter option if he wants to get home before 7PM EDT."
642
+ ],
643
+ "text/plain": [
644
+ "<IPython.core.display.Markdown object>"
645
+ ]
646
+ },
647
+ "metadata": {},
648
+ "output_type": "display_data"
649
+ }
650
+ ],
651
+ "source": [
652
+ "reasoning_problem = \"\"\"\n",
653
+ "Billy wants to get home from San Fran. before 7PM EDT.\n",
654
+ "\n",
655
+ "It's currently 1PM local time.\n",
656
+ "\n",
657
+ "Billy can either fly (3hrs), and then take a bus (2hrs), or Billy can take the teleporter (0hrs) and then a bus (1hrs).\n",
658
+ "\n",
659
+ "Does it matter which travel option Billy selects?\n",
660
+ "\"\"\"\n",
661
+ "\n",
662
+ "list_of_prompts = [\n",
663
+ " user_prompt(reasoning_problem)\n",
664
+ "]\n",
665
+ "\n",
666
+ "reasoning_response = get_response(openai_client, list_of_prompts)\n",
667
+ "pretty_print(reasoning_response)"
668
+ ]
669
+ },
670
+ {
671
+ "cell_type": "markdown",
672
+ "metadata": {
673
+ "id": "rbqj30CQJnQl"
674
+ },
675
+ "source": [
676
+ "As humans, we can reason through the problem and pick up on the potential \"trick\" that the LLM fell for: 1PM *local time* in San Fran. is 4PM EDT. This means the cumulative travel time of 5hrs. for the plane/bus option would not get Billy home in time.\n",
677
+ "\n",
678
+ "Let's see if we can leverage a simple CoT prompt to improve our model's performance on this task:"
679
+ ]
680
+ },
681
+ {
682
+ "cell_type": "code",
683
+ "execution_count": 16,
684
+ "metadata": {
685
+ "colab": {
686
+ "base_uri": "https://localhost:8080/",
687
+ "height": 247
688
+ },
689
+ "id": "A9Am3QNGJXHR",
690
+ "outputId": "d3d94113-d277-454f-eb6b-bf2295fd3907"
691
+ },
692
+ "outputs": [
693
+ {
694
+ "data": {
695
+ "text/markdown": [
696
+ "In order to determine which travel option Billy should select, we need to consider the total travel time for each option.\n",
697
+ "\n",
698
+ "Option 1: Fly for 3 hours and then take a bus for 2 hours\n",
699
+ "Total travel time = 3 hours (flight) + 2 hours (bus) = 5 hours\n",
700
+ "\n",
701
+ "Option 2: Take a teleporter for 0 hours and then take a bus for 1 hour\n",
702
+ "Total travel time = 0 hours (teleporter) + 1 hour (bus) = 1 hour\n",
703
+ "\n",
704
+ "Given that Billy needs to get home before 7 PM EDT and it is currently 1 PM local time, he has 6 hours before his deadline. \n",
705
+ "\n",
706
+ "If Billy chooses option 1, he would arrive home at 6 PM local time (5 hours of travel time) which is before his deadline. \n",
707
+ "\n",
708
+ "If Billy chooses option 2, he would arrive home at 2 PM local time (1 hour of travel time) which is well before his deadline.\n",
709
+ "\n",
710
+ "Therefore, it does not matter which travel option Billy selects as both options will get him home before 7 PM EDT. However, option 2 would get him home faster."
711
+ ],
712
+ "text/plain": [
713
+ "<IPython.core.display.Markdown object>"
714
+ ]
715
+ },
716
+ "metadata": {},
717
+ "output_type": "display_data"
718
+ }
719
+ ],
720
+ "source": [
721
+ "list_of_prompts = [\n",
722
+ " user_prompt(reasoning_problem + \" Think though your response step by step.\")\n",
723
+ "]\n",
724
+ "\n",
725
+ "reasoning_response = get_response(openai_client, list_of_prompts)\n",
726
+ "pretty_print(reasoning_response)"
727
+ ]
728
+ },
729
+ {
730
+ "cell_type": "markdown",
731
+ "metadata": {
732
+ "id": "AXbAKxHQJqn9"
733
+ },
734
+ "source": [
735
+ "With the addition of a single phrase `\"Think through your response step by step.\"` we're able to completely turn the response around."
736
+ ]
737
+ },
738
+ {
739
+ "cell_type": "markdown",
740
+ "metadata": {
741
+ "id": "VnoUx07-JrwR"
742
+ },
743
+ "source": [
744
+ "## 3. Prompt Engineering Principles\n",
745
+ "\n",
746
+ "As you can see - a simple addition of asking the LLM to \"think about it\" (essentially) results in a better quality response.\n",
747
+ "\n",
748
+ "There's a [great paper](https://arxiv.org/pdf/2312.16171v1.pdf) that dives into some principles for effective prompt generation.\n",
749
+ "\n",
750
+ "Your task for this notebook is to construct a prompt that will be used in the following breakout room to create a helpful assistant for whatever task you'd like."
751
+ ]
752
+ },
753
+ {
754
+ "cell_type": "markdown",
755
+ "metadata": {
756
+ "id": "da6u7e8AKYrz"
757
+ },
758
+ "source": [
759
+ "### 🏗️ Activity #2:\n",
760
+ "\n",
761
+ "There are two subtasks in this activity:\n",
762
+ "\n",
763
+ "1. Write a `system_template` that leverages 2-3 of the principles from [this paper](https://arxiv.org/pdf/2312.16171v1.pdf)\n",
764
+ "\n",
765
+ "2. Modify the `user_template` to improve the quality of the LLM's responses.\n",
766
+ "\n",
767
+ "> NOTE: PLEASE DO NOT MODIFY THE `{input}` in the `user_template`."
768
+ ]
769
+ },
770
+ {
771
+ "cell_type": "code",
772
+ "execution_count": 32,
773
+ "metadata": {
774
+ "id": "8sOLBQPeKlDe"
775
+ },
776
+ "outputs": [],
777
+ "source": [
778
+ "system_template = \"\"\"\\\n",
779
+ "Your task is to explain concepts like a teacher. Explain to me in simple english like I'm 5 years old. You will be penalized for adding concepts that are unfamiliar for a 5 year old. \n",
780
+ "\"\"\""
781
+ ]
782
+ },
783
+ {
784
+ "cell_type": "code",
785
+ "execution_count": 33,
786
+ "metadata": {
787
+ "id": "xoz4-QLTKvEV"
788
+ },
789
+ "outputs": [],
790
+ "source": [
791
+ "user_template = \"\"\"{input}\n",
792
+ "MODIFICATIONS HERE\n",
793
+ "\"\"\""
794
+ ]
795
+ },
796
+ {
797
+ "cell_type": "markdown",
798
+ "metadata": {
799
+ "id": "6cuInoIbLWGd"
800
+ },
801
+ "source": [
802
+ "## 4. Testing Your Prompt\n",
803
+ "\n",
804
+ "Now we can test the prompt you made using an LLM-as-a-judge see what happens to your score as you modify the prompt."
805
+ ]
806
+ },
807
+ {
808
+ "cell_type": "code",
809
+ "execution_count": 34,
810
+ "metadata": {
811
+ "colab": {
812
+ "base_uri": "https://localhost:8080/",
813
+ "height": 186
814
+ },
815
+ "id": "sPaNO5XTLgRJ",
816
+ "outputId": "dae87716-a83f-4c62-e8d2-491e7f992b56"
817
+ },
818
+ "outputs": [
819
+ {
820
+ "data": {
821
+ "text/markdown": [
822
+ "Okay, sweetie! Imagine you have a really special kind of computer that can do super fast math problems. This special computer uses tiny things called quantum bits, or qubits, to store and process information. These qubits can do lots of different calculations at the same time, which makes quantum computers much faster than regular computers. It's like having a magical machine that can solve puzzles way quicker than anything else! Isn't that cool?"
823
+ ],
824
+ "text/plain": [
825
+ "<IPython.core.display.Markdown object>"
826
+ ]
827
+ },
828
+ "metadata": {},
829
+ "output_type": "display_data"
830
+ }
831
+ ],
832
+ "source": [
833
+ "query = \"What is Quantum Computing\"\n",
834
+ "\n",
835
+ "list_of_prompts = [\n",
836
+ " system_prompt(system_template),\n",
837
+ " user_prompt(user_template.format(input=query))\n",
838
+ "]\n",
839
+ "\n",
840
+ "test_response = get_response(openai_client, list_of_prompts)\n",
841
+ "\n",
842
+ "pretty_print(test_response)\n",
843
+ "\n",
844
+ "evaluator_system_template = \"\"\"You are an expert in analyzing the quality of a response.\n",
845
+ "\n",
846
+ "You should be hyper-critical.\n",
847
+ "\n",
848
+ "Provide scores (out of 10) for the following attributes:\n",
849
+ "\n",
850
+ "1. Clarity - how clear is the response\n",
851
+ "2. Faithfulness - how related to the original query is the response\n",
852
+ "3. Correctness - was the response correct?\n",
853
+ "\n",
854
+ "Please take your time, and think through each item step-by-step, when you are done - please provide your response in the following JSON format:\n",
855
+ "\n",
856
+ "{\"clarity\" : \"score_out_of_10\", \"faithfulness\" : \"score_out_of_10\", \"correctness\" : \"score_out_of_10\"}\"\"\"\n",
857
+ "\n",
858
+ "evaluation_template = \"\"\"Query: {input}\n",
859
+ "Response: {response}\"\"\"\n",
860
+ "\n",
861
+ "list_of_prompts = [\n",
862
+ " system_prompt(evaluator_system_template),\n",
863
+ " user_prompt(evaluation_template.format(\n",
864
+ " input=query,\n",
865
+ " response=test_response.choices[0].message.content\n",
866
+ " ))\n",
867
+ "]\n",
868
+ "\n",
869
+ "evaluator_response = openai_client.chat.completions.create(\n",
870
+ " model=\"gpt-4o\",\n",
871
+ " messages=list_of_prompts,\n",
872
+ " response_format={\"type\" : \"json_object\"}\n",
873
+ ")"
874
+ ]
875
+ },
876
+ {
877
+ "cell_type": "code",
878
+ "execution_count": 35,
879
+ "metadata": {
880
+ "colab": {
881
+ "base_uri": "https://localhost:8080/",
882
+ "height": 46
883
+ },
884
+ "id": "OUvc1PdnNIKD",
885
+ "outputId": "8659b9dd-2afc-42a4-a71e-ce0ebd086c49"
886
+ },
887
+ "outputs": [
888
+ {
889
+ "data": {
890
+ "text/markdown": [
891
+ "\n",
892
+ "{\"clarity\" : \"7\", \"faithfulness\" : \"5\", \"correctness\" : \"6\"}"
893
+ ],
894
+ "text/plain": [
895
+ "<IPython.core.display.Markdown object>"
896
+ ]
897
+ },
898
+ "metadata": {},
899
+ "output_type": "display_data"
900
+ }
901
+ ],
902
+ "source": [
903
+ "pretty_print(evaluator_response)"
904
+ ]
905
+ },
906
+ {
907
+ "cell_type": "markdown",
908
+ "metadata": {
909
+ "id": "M7ryIRGwR2Gq"
910
+ },
911
+ "source": [
912
+ "#### ❓Question #1:\n",
913
+ "\n",
914
+ "How did your prompting strategies change the evaluation scores? What does this tell you/what did you learn?"
915
+ ]
916
+ },
917
+ {
918
+ "cell_type": "markdown",
919
+ "metadata": {
920
+ "id": "e5NomM0eSIFd"
921
+ },
922
+ "source": [
923
+ "> Either the LLM doesn't understand the evaluation criterion or not able to assess it, it shows degradation of the scores with additional strategies like penalizing, role etc. It's not consistent."
924
+ ]
925
+ }
926
+ ],
927
+ "metadata": {
928
+ "colab": {
929
+ "provenance": [],
930
+ "toc_visible": true
931
+ },
932
+ "kernelspec": {
933
+ "display_name": "Python 3",
934
+ "name": "python3"
935
+ },
936
+ "language_info": {
937
+ "codemirror_mode": {
938
+ "name": "ipython",
939
+ "version": 3
940
+ },
941
+ "file_extension": ".py",
942
+ "mimetype": "text/x-python",
943
+ "name": "python",
944
+ "nbconvert_exporter": "python",
945
+ "pygments_lexer": "ipython3",
946
+ "version": "3.12.3"
947
+ }
948
+ },
949
+ "nbformat": 4,
950
+ "nbformat_minor": 0
951
+ }
__pycache__/app.cpython-39.pyc ADDED
Binary file (1.86 kB). View file
 
chainlit.md CHANGED
@@ -4,3 +4,7 @@ This Chainlit app was created following instructions from [this repository!](htt
4
 
5
  This is my attempt at providing a window into GPT Models. Use it similar to how you would play ChatGPT application.
6
 
 
 
 
 
 
4
 
5
  This is my attempt at providing a window into GPT Models. Use it similar to how you would play ChatGPT application.
6
 
7
+ | **Property** | **Value** |
8
+ | :-------------| :-----------------------|
9
+ | Developer | Krupakar Pasupuleti |
10
+ | LLM Model | OpenAI GPT 3.5 Turbo |