readme_url
stringlengths
60
79
sentence
sequence
token
sequence
text
stringlengths
9
6.51k
url
stringlengths
30
49
level of complexity
int64
-1
2
topic
sequence
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "window", "coming", "soon", "!", ",", "install", "ollama", "window", "via", "wsl2", "." ], [ "window coming soon !", ", install ollama window via wsl2 ." ] ]
[ [ "window", "coming", "soon", "!", ",", "install", "ollama", "window", "via", "wsl2", "." ], [ "window coming soon !", ", install ollama window via wsl2 ." ] ]
Windows Coming soon! For now, you can install Ollama on Windows via WSL2.
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "linux", "&", "wsl2", "``", "`", "curl", "http", ":", "//ollama.ai/install.sh", "|", "sh", "``", "`", "[", "manual", "install", "instruction", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/linux.md", ")" ], [ "linux & wsl2 `` ` curl http : //ollama.ai/install.sh | sh `` ` [ manual install instruction ] ( http : //github.com/jmorganca/ollama/blob/main/docs/linux.md )" ] ]
[ [ "linux", "&", "wsl2", "``", "`", "curl", "http", ":", "//ollama.ai/install.sh", "|", "sh", "``", "`", "[", "manual", "install", "instruction", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/linux.md", ")" ], [ "linux & wsl2 `` ` curl http : //ollama.ai/install.sh | sh `` ` [ manual install instruction ] ( http : //github.com/jmorganca/ollama/blob/main/docs/linux.md )" ] ]
Linux & WSL2 ``` curl https://ollama.ai/install.sh | sh ``` [Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "building", "install", "`", "cmake", "`", "`", "go", "`", ":", "``", "`", "brew", "install", "cmake", "go", "``", "`", "generate", "dependency", ":", "``", "`", "go", "generate", "./", "...", "``", "`", "build", "binary", ":", "``", "`", "go", "build", ".", "``", "`", "detailed", "instruction", "found", "[", "developer", "guide", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/development.md", ")" ], [ "building install ` cmake ` ` go ` : `` ` brew install cmake go `` ` generate dependency : `` ` go generate ./ ... `` ` build binary : `` ` go build .", "`` ` detailed instruction found [ developer guide ] ( http : //github.com/jmorganca/ollama/blob/main/docs/development.md )" ] ]
[ [ "building", "install", "`", "cmake", "`", "`", "go", "`", ":", "``", "`", "brew", "install", "cmake", "go", "``", "`", "generate", "dependency", ":", "``", "`", "go", "generate", "./", "...", "``", "`", "build", "binary", ":", "``", "`", "go", "build", ".", "``", "`", "detailed", "instruction", "found", "[", "developer", "guide", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/development.md", ")" ], [ "building install ` cmake ` ` go ` : `` ` brew install cmake go `` ` generate dependency : `` ` go generate ./ ... `` ` build binary : `` ` go build .", "`` ` detailed instruction found [ developer guide ] ( http : //github.com/jmorganca/ollama/blob/main/docs/development.md )" ] ]
Building Install `cmake` and `go`: ``` brew install cmake go ``` Then generate dependencies: ``` go generate ./... ``` Then build the binary: ``` go build . ``` More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "extension", "&", "plugins", "-", "[", "raycast", "extension", "]", "(", "http", ":", "//github.com/massimilianopasquini97/raycast_ollama", ")", "-", "[", "discollama", "]", "(", "http", ":", "//github.com/mxyng/discollama", ")", "(", "discord", "bot", "inside", "ollama", "discord", "channel", ")", "-", "[", "continue", "]", "(", "http", ":", "//github.com/continuedev/continue", ")", "-", "[", "obsidian", "ollama", "plugin", "]", "(", "http", ":", "//github.com/hinterdupfinger/obsidian-ollama", ")", "-", "[", "logseq", "ollama", "plugin", "]", "(", "http", ":", "//github.com/omagdy7/ollama-logseq", ")", "-", "[", "dagger", "chatbot", "]", "(", "http", ":", "//github.com/samalba/dagger-chatbot", ")", "-", "[", "discord", "ai", "bot", "]", "(", "http", ":", "//github.com/mekb-turtle/discord-ai-bot", ")", "-", "[", "ollama", "telegram", "bot", "]", "(", "http", ":", "//github.com/ruecat/ollama-telegram", ")", "-", "[", "ha", "ollama", "conversation", "]", "(", "http", ":", "//github.com/ej52/hass-ollama-conversation", ")", "-", "[", "rivet", "plugin", "]", "(", "http", ":", "//github.com/abrenneke/rivet-plugin-ollama", ")", "-", "[", "llama", "coder", "]", "(", "http", ":", "//github.com/ex3ndr/llama-coder", ")", "(", "copilot", "alternative", "using", "ollama", ")", "-", "[", "obsidian", "bmo", "chatbot", "plugin", "]", "(", "http", ":", "//github.com/longy2k/obsidian-bmo-chatbot", ")", "-", "[", "open", "interpreter", "]", "(", "http", ":", "//docs.openinterpreter.com/language-model-setup/local-models/ollama", ")", "-", "[", "twinny", "]", "(", "http", ":", "//github.com/rjmacarthy/twinny", ")", "(", "copilot", "copilot", "chat", "alternative", "using", "ollama", ")", "-", "[", "wingman-ai", "]", "(", "http", ":", "//github.com/russellcanfield/wingman-ai", ")", "(", "copilot", "code", "chat", "alternative", "using", "ollama", "huggingface", ")" ], [ "extension & plugins - [ raycast extension ] ( http : //github.com/massimilianopasquini97/raycast_ollama ) - [ discollama ] ( http : //github.com/mxyng/discollama ) ( discord bot inside ollama discord channel ) - [ continue ] ( http : //github.com/continuedev/continue ) - [ obsidian ollama plugin ] ( http : //github.com/hinterdupfinger/obsidian-ollama ) - [ logseq ollama plugin ] ( http : //github.com/omagdy7/ollama-logseq ) - [ dagger chatbot ] ( http : //github.com/samalba/dagger-chatbot ) - [ discord ai bot ] ( http : //github.com/mekb-turtle/discord-ai-bot ) - [ ollama telegram bot ] ( http : //github.com/ruecat/ollama-telegram ) - [ ha ollama conversation ] ( http : //github.com/ej52/hass-ollama-conversation ) - [ rivet plugin ] ( http : //github.com/abrenneke/rivet-plugin-ollama ) - [ llama coder ] ( http : //github.com/ex3ndr/llama-coder ) ( copilot alternative using ollama ) - [ obsidian bmo chatbot plugin ] ( http : //github.com/longy2k/obsidian-bmo-chatbot ) - [ open interpreter ] ( http : //docs.openinterpreter.com/language-model-setup/local-models/ollama ) - [ twinny ] ( http : //github.com/rjmacarthy/twinny ) ( copilot copilot chat alternative using ollama ) - [ wingman-ai ] ( http : //github.com/russellcanfield/wingman-ai ) ( copilot code chat alternative using ollama huggingface )" ] ]
[ [ "extension", "&", "plugins", "-", "[", "raycast", "extension", "]", "(", "http", ":", "//github.com/massimilianopasquini97/raycast_ollama", ")", "-", "[", "discollama", "]", "(", "http", ":", "//github.com/mxyng/discollama", ")", "(", "discord", "bot", "inside", "ollama", "discord", "channel", ")", "-", "[", "continue", "]", "(", "http", ":", "//github.com/continuedev/continue", ")", "-", "[", "obsidian", "ollama", "plugin", "]", "(", "http", ":", "//github.com/hinterdupfinger/obsidian-ollama", ")", "-", "[", "logseq", "ollama", "plugin", "]", "(", "http", ":", "//github.com/omagdy7/ollama-logseq", ")", "-", "[", "dagger", "chatbot", "]", "(", "http", ":", "//github.com/samalba/dagger-chatbot", ")", "-", "[", "discord", "ai", "bot", "]", "(", "http", ":", "//github.com/mekb-turtle/discord-ai-bot", ")", "-", "[", "ollama", "telegram", "bot", "]", "(", "http", ":", "//github.com/ruecat/ollama-telegram", ")", "-", "[", "ha", "ollama", "conversation", "]", "(", "http", ":", "//github.com/ej52/hass-ollama-conversation", ")", "-", "[", "rivet", "plugin", "]", "(", "http", ":", "//github.com/abrenneke/rivet-plugin-ollama", ")", "-", "[", "llama", "coder", "]", "(", "http", ":", "//github.com/ex3ndr/llama-coder", ")", "(", "copilot", "alternative", "using", "ollama", ")", "-", "[", "obsidian", "bmo", "chatbot", "plugin", "]", "(", "http", ":", "//github.com/longy2k/obsidian-bmo-chatbot", ")", "-", "[", "open", "interpreter", "]", "(", "http", ":", "//docs.openinterpreter.com/language-model-setup/local-models/ollama", ")", "-", "[", "twinny", "]", "(", "http", ":", "//github.com/rjmacarthy/twinny", ")", "(", "copilot", "copilot", "chat", "alternative", "using", "ollama", ")", "-", "[", "wingman-ai", "]", "(", "http", ":", "//github.com/russellcanfield/wingman-ai", ")", "(", "copilot", "code", "chat", "alternative", "using", "ollama", "huggingface", ")" ], [ "extension & plugins - [ raycast extension ] ( http : //github.com/massimilianopasquini97/raycast_ollama ) - [ discollama ] ( http : //github.com/mxyng/discollama ) ( discord bot inside ollama discord channel ) - [ continue ] ( http : //github.com/continuedev/continue ) - [ obsidian ollama plugin ] ( http : //github.com/hinterdupfinger/obsidian-ollama ) - [ logseq ollama plugin ] ( http : //github.com/omagdy7/ollama-logseq ) - [ dagger chatbot ] ( http : //github.com/samalba/dagger-chatbot ) - [ discord ai bot ] ( http : //github.com/mekb-turtle/discord-ai-bot ) - [ ollama telegram bot ] ( http : //github.com/ruecat/ollama-telegram ) - [ ha ollama conversation ] ( http : //github.com/ej52/hass-ollama-conversation ) - [ rivet plugin ] ( http : //github.com/abrenneke/rivet-plugin-ollama ) - [ llama coder ] ( http : //github.com/ex3ndr/llama-coder ) ( copilot alternative using ollama ) - [ obsidian bmo chatbot plugin ] ( http : //github.com/longy2k/obsidian-bmo-chatbot ) - [ open interpreter ] ( http : //docs.openinterpreter.com/language-model-setup/local-models/ollama ) - [ twinny ] ( http : //github.com/rjmacarthy/twinny ) ( copilot copilot chat alternative using ollama ) - [ wingman-ai ] ( http : //github.com/russellcanfield/wingman-ai ) ( copilot code chat alternative using ollama huggingface )" ] ]
Extensions & Plugins - [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama) - [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel) - [Continue](https://github.com/continuedev/continue) - [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama) - [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq) - [Dagger Chatbot](https://github.com/samalba/dagger-chatbot) - [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) - [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram) - [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation) - [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama) - [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama) - [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot) - [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama) - [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama) - [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "install" ], [ "install" ] ]
[ [ "install" ], [ "install" ] ]
Install
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "pip", "installation", ">", "ensure", "python", "3.9+", "installed", "system", ".", "check", "using", ":", "`", "python", "--", "version", "`", ".", ">", "use", "conda", "like", ":", "`", "conda", "create", "-n", "metagpt", "python=3.9", "&", "&", "conda", "activate", "metagpt", "`", "``", "`", "bash", "pip", "install", "metagpt", "metagpt", "--", "init-config" ], [ "pip installation > ensure python 3.9+ installed system .", "check using : ` python -- version ` .", "> use conda like : ` conda create -n metagpt python=3.9 & & conda activate metagpt ` `` ` bash pip install metagpt metagpt -- init-config" ] ]
[ [ "pip", "installation", ">", "ensure", "python", "3.9+", "installed", "system", ".", "check", "using", ":", "`", "python", "--", "version", "`", ".", ">", "use", "conda", "like", ":", "`", "conda", "create", "-n", "metagpt", "python=3.9", "&", "&", "conda", "activate", "metagpt", "`", "``", "`", "bash", "pip", "install", "metagpt", "metagpt", "--", "init-config" ], [ "pip installation > ensure python 3.9+ installed system .", "check using : ` python -- version ` .", "> use conda like : ` conda create -n metagpt python=3.9 & & conda activate metagpt ` `` ` bash pip install metagpt metagpt -- init-config" ] ]
Pip installation > Ensure that Python 3.9+ is installed on your system. You can check this by using: `python --version`. > You can use conda like this: `conda create -n metagpt python=3.9 && conda activate metagpt` ```bash pip install metagpt metagpt --init-config
https://github.com/geekan/MetaGPT
0
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "print", "repo", "structure", "file", "``", "`", "detail", "installation", "please", "refer", "[", "cli_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-stable-version", ")" ], [ "print repo structure file `` ` detail installation please refer [ cli_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-stable-version )" ] ]
[ [ "print", "repo", "structure", "file", "``", "`", "detail", "installation", "please", "refer", "[", "cli_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-stable-version", ")" ], [ "print repo structure file `` ` detail installation please refer [ cli_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-stable-version )" ] ]
it will print the repo structure with files ``` detail installation please refer to [cli_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-stable-version)
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "docker", "installation", ">", "note", ":", "window", ",", "need", "replace", "``", "/opt/metagpt", "''", "directory", "docker", "permission", "create", ",", "``", ":", "\\users\\x\\metagpt", "''", "``", "`", "bash" ], [ "docker installation > note : window , need replace `` /opt/metagpt '' directory docker permission create , `` : \\users\\x\\metagpt '' `` ` bash" ] ]
[ [ "docker", "installation", ">", "note", ":", "window", ",", "need", "replace", "``", "/opt/metagpt", "''", "directory", "docker", "permission", "create", ",", "``", ":", "\\users\\x\\metagpt", "''", "``", "`", "bash" ], [ "docker installation > note : window , need replace `` /opt/metagpt '' directory docker permission create , `` : \\users\\x\\metagpt '' `` ` bash" ] ]
Docker installation > Note: In the Windows, you need to replace "/opt/metagpt" with a directory that Docker has permission to create, such as "D:\Users\x\metagpt" ```bash
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "step", "2", ":", "run", "metagpt", "demo", "container", "docker", "run", "--", "rm", "\\", "--", "privileged", "\\", "-v", "/opt/metagpt/config/config2.yaml", ":", "/app/metagpt/config/config2.yaml", "\\", "-v", "/opt/metagpt/workspace", ":", "/app/metagpt/workspace", "\\", "metagpt/metagpt", ":", "latest", "\\", "metagpt", "``", "create", "2048", "game", "''", "``", "`", "detail", "installation", "please", "refer", "[", "docker_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-with-docker", ")" ], [ "step 2 : run metagpt demo container docker run -- rm \\ -- privileged \\ -v /opt/metagpt/config/config2.yaml : /app/metagpt/config/config2.yaml \\ -v /opt/metagpt/workspace : /app/metagpt/workspace \\ metagpt/metagpt : latest \\ metagpt `` create 2048 game '' `` ` detail installation please refer [ docker_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-with-docker )" ] ]
[ [ "step", "2", ":", "run", "metagpt", "demo", "container", "docker", "run", "--", "rm", "\\", "--", "privileged", "\\", "-v", "/opt/metagpt/config/config2.yaml", ":", "/app/metagpt/config/config2.yaml", "\\", "-v", "/opt/metagpt/workspace", ":", "/app/metagpt/workspace", "\\", "metagpt/metagpt", ":", "latest", "\\", "metagpt", "``", "create", "2048", "game", "''", "``", "`", "detail", "installation", "please", "refer", "[", "docker_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-with-docker", ")" ], [ "step 2 : run metagpt demo container docker run -- rm \\ -- privileged \\ -v /opt/metagpt/config/config2.yaml : /app/metagpt/config/config2.yaml \\ -v /opt/metagpt/workspace : /app/metagpt/workspace \\ metagpt/metagpt : latest \\ metagpt `` create 2048 game '' `` ` detail installation please refer [ docker_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-with-docker )" ] ]
Step 2: Run metagpt demo with container docker run --rm \ --privileged \ -v /opt/metagpt/config/config2.yaml:/app/metagpt/config/config2.yaml \ -v /opt/metagpt/workspace:/app/metagpt/workspace \ metagpt/metagpt:latest \ metagpt "Create a 2048 game" ``` detail installation please refer to [docker_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-with-docker)
https://github.com/geekan/MetaGPT
1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "quickstart", "&", "demo", "video", "-", "try", "[", "metagpt", "huggingface", "space", "]", "(", "http", ":", "//huggingface.co/spaces/deepwisdom/metagpt", ")", "-", "[", "matthew", "berman", ":", "install", "metagpt", "-", "build", "startup", "one", "prompt", "!", "!", "]", "(", "http", ":", "//youtu.be/ut75j_kg_ay", ")", "-", "[", "official", "demo", "video", "]", "(", "http", ":", "//github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d", ")", "http", ":", "//github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ], [ "quickstart & demo video - try [ metagpt huggingface space ] ( http : //huggingface.co/spaces/deepwisdom/metagpt ) - [ matthew berman : install metagpt - build startup one prompt ! !", "] ( http : //youtu.be/ut75j_kg_ay ) - [ official demo video ] ( http : //github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d ) http : //github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ] ]
[ [ "quickstart", "&", "demo", "video", "-", "try", "[", "metagpt", "huggingface", "space", "]", "(", "http", ":", "//huggingface.co/spaces/deepwisdom/metagpt", ")", "-", "[", "matthew", "berman", ":", "install", "metagpt", "-", "build", "startup", "one", "prompt", "!", "!", "]", "(", "http", ":", "//youtu.be/ut75j_kg_ay", ")", "-", "[", "official", "demo", "video", "]", "(", "http", ":", "//github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d", ")", "http", ":", "//github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ], [ "quickstart & demo video - try [ metagpt huggingface space ] ( http : //huggingface.co/spaces/deepwisdom/metagpt ) - [ matthew berman : install metagpt - build startup one prompt ! !", "] ( http : //youtu.be/ut75j_kg_ay ) - [ official demo video ] ( http : //github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d ) http : //github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ] ]
QuickStart & Demo Video - Try it on [MetaGPT Huggingface Space](https://huggingface.co/spaces/deepwisdom/MetaGPT) - [Matthew Berman: How To Install MetaGPT - Build A Startup With One Prompt!!](https://youtu.be/uT75J_KG_aY) - [Official Demo Video](https://github.com/geekan/MetaGPT/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d) https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "tutorial", "-", "๐Ÿ—’", "[", "online", "document", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/", ")", "-", "๐Ÿ’ป", "[", "usage", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html", ")", "-", "๐Ÿ”Ž", "[", "metagpt", "?", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/introduction.html", ")", "-", "๐Ÿ› ", "build", "agent", "?", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "agent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html", ")", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "multiagent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html", ")", "-", "๐Ÿง‘โ€๐Ÿ’ป", "contribution", "-", "[", "develop", "roadmap", "]", "(", "docs/roadmap.md", ")", "-", "๐Ÿ”–", "use", "case", "-", "[", "debate", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html", ")", "-", "[", "researcher", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html", ")", "-", "[", "recepit", "assistant", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html", ")", "-", "โ“", "[", "faq", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/faq.html", ")" ], [ "tutorial - ๐Ÿ—’ [ online document ] ( http : //docs.deepwisdom.ai/main/en/ ) - ๐Ÿ’ป [ usage ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html ) - ๐Ÿ”Ž [ metagpt ?", "] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/introduction.html ) - ๐Ÿ›  build agent ?", "- [ metagpt usage & development guide | agent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html ) - [ metagpt usage & development guide | multiagent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html ) - ๐Ÿง‘โ€๐Ÿ’ป contribution - [ develop roadmap ] ( docs/roadmap.md ) - ๐Ÿ”– use case - [ debate ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html ) - [ researcher ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html ) - [ recepit assistant ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html ) - โ“ [ faq ] ( http : //docs.deepwisdom.ai/main/en/guide/faq.html )" ] ]
[ [ "tutorial", "-", "๐Ÿ—’", "[", "online", "document", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/", ")", "-", "๐Ÿ’ป", "[", "usage", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html", ")", "-", "๐Ÿ”Ž", "[", "metagpt", "?", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/introduction.html", ")", "-", "๐Ÿ› ", "build", "agent", "?", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "agent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html", ")", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "multiagent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html", ")", "-", "๐Ÿง‘โ€๐Ÿ’ป", "contribution", "-", "[", "develop", "roadmap", "]", "(", "docs/roadmap.md", ")", "-", "๐Ÿ”–", "use", "case", "-", "[", "debate", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html", ")", "-", "[", "researcher", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html", ")", "-", "[", "recepit", "assistant", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html", ")", "-", "โ“", "[", "faq", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/faq.html", ")" ], [ "tutorial - ๐Ÿ—’ [ online document ] ( http : //docs.deepwisdom.ai/main/en/ ) - ๐Ÿ’ป [ usage ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html ) - ๐Ÿ”Ž [ metagpt ?", "] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/introduction.html ) - ๐Ÿ›  build agent ?", "- [ metagpt usage & development guide | agent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html ) - [ metagpt usage & development guide | multiagent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html ) - ๐Ÿง‘โ€๐Ÿ’ป contribution - [ develop roadmap ] ( docs/roadmap.md ) - ๐Ÿ”– use case - [ debate ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html ) - [ researcher ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html ) - [ recepit assistant ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html ) - โ“ [ faq ] ( http : //docs.deepwisdom.ai/main/en/guide/faq.html )" ] ]
Tutorial - ๐Ÿ—’ [Online Document](https://docs.deepwisdom.ai/main/en/) - ๐Ÿ’ป [Usage](https://docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html) - ๐Ÿ”Ž [What can MetaGPT do?](https://docs.deepwisdom.ai/main/en/guide/get_started/introduction.html) - ๐Ÿ›  How to build your own agents? - [MetaGPT Usage & Development Guide | Agent 101](https://docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html) - [MetaGPT Usage & Development Guide | MultiAgent 101](https://docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html) - ๐Ÿง‘โ€๐Ÿ’ป Contribution - [Develop Roadmap](docs/ROADMAP.md) - ๐Ÿ”– Use Cases - [Debate](https://docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html) - [Researcher](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html) - [Recepit Assistant](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html) - โ“ [FAQs](https://docs.deepwisdom.ai/main/en/guide/faq.html)
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/run-llama/llama_index/main/README.md
[ [ "๐Ÿ’ป", "example", "usage", "``", "`", "pip", "install", "llama-index", "``", "`", "example", "`", "example", "`", "folder", ".", "index", "`", "index", "`", "folder", "(", "see", "list", "index", ")", ".", "build", "simple", "vector", "store", "index", "using", "openai", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "openai_api_key", "''", "]", "=", "``", "your_openai_api_key", "''", "llama_index", "import", "vectorstoreindex", ",", "simpledirectoryreader", "document", "=", "simpledirectoryreader", "(", "``", "your_data_directory", "''", ")", ".load_data", "(", ")", "index", "=", "vectorstoreindex.from_documents", "(", "document", ")", "``", "`", "build", "simple", "vector", "store", "index", "using", "non-openai", "llm", ",", "e.g", ".", "llama", "2", "hosted", "[", "replicate", "]", "(", "http", ":", "//replicate.com/", ")", ",", "easily", "create", "free", "trial", "api", "token", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "replicate_api_token", "''", "]", "=", "``", "your_replicate_api_token", "''", "llama_index.llms", "import", "replicate", "llama2_7b_chat", "=", "``", "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", "''", "llm", "=", "replicate", "(", "model=llama2_7b_chat", ",", "temperature=0.01", ",", "additional_kwargs=", "{", "``", "top_p", "''", ":", "1", ",", "``", "max_new_tokens", "''", ":", "300", "}", ",", ")" ], [ "๐Ÿ’ป example usage `` ` pip install llama-index `` ` example ` example ` folder .", "index ` index ` folder ( see list index ) .", "build simple vector store index using openai : `` ` python import o os.environ [ `` openai_api_key '' ] = `` your_openai_api_key '' llama_index import vectorstoreindex , simpledirectoryreader document = simpledirectoryreader ( `` your_data_directory '' ) .load_data ( ) index = vectorstoreindex.from_documents ( document ) `` ` build simple vector store index using non-openai llm , e.g .", "llama 2 hosted [ replicate ] ( http : //replicate.com/ ) , easily create free trial api token : `` ` python import o os.environ [ `` replicate_api_token '' ] = `` your_replicate_api_token '' llama_index.llms import replicate llama2_7b_chat = `` meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e '' llm = replicate ( model=llama2_7b_chat , temperature=0.01 , additional_kwargs= { `` top_p '' : 1 , `` max_new_tokens '' : 300 } , )" ] ]
[ [ "๐Ÿ’ป", "example", "usage", "``", "`", "pip", "install", "llama-index", "``", "`", "example", "`", "example", "`", "folder", ".", "index", "`", "index", "`", "folder", "(", "see", "list", "index", ")", ".", "build", "simple", "vector", "store", "index", "using", "openai", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "openai_api_key", "''", "]", "=", "``", "your_openai_api_key", "''", "llama_index", "import", "vectorstoreindex", ",", "simpledirectoryreader", "document", "=", "simpledirectoryreader", "(", "``", "your_data_directory", "''", ")", ".load_data", "(", ")", "index", "=", "vectorstoreindex.from_documents", "(", "document", ")", "``", "`", "build", "simple", "vector", "store", "index", "using", "non-openai", "llm", ",", "e.g", ".", "llama", "2", "hosted", "[", "replicate", "]", "(", "http", ":", "//replicate.com/", ")", ",", "easily", "create", "free", "trial", "api", "token", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "replicate_api_token", "''", "]", "=", "``", "your_replicate_api_token", "''", "llama_index.llms", "import", "replicate", "llama2_7b_chat", "=", "``", "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", "''", "llm", "=", "replicate", "(", "model=llama2_7b_chat", ",", "temperature=0.01", ",", "additional_kwargs=", "{", "``", "top_p", "''", ":", "1", ",", "``", "max_new_tokens", "''", ":", "300", "}", ",", ")" ], [ "๐Ÿ’ป example usage `` ` pip install llama-index `` ` example ` example ` folder .", "index ` index ` folder ( see list index ) .", "build simple vector store index using openai : `` ` python import o os.environ [ `` openai_api_key '' ] = `` your_openai_api_key '' llama_index import vectorstoreindex , simpledirectoryreader document = simpledirectoryreader ( `` your_data_directory '' ) .load_data ( ) index = vectorstoreindex.from_documents ( document ) `` ` build simple vector store index using non-openai llm , e.g .", "llama 2 hosted [ replicate ] ( http : //replicate.com/ ) , easily create free trial api token : `` ` python import o os.environ [ `` replicate_api_token '' ] = `` your_replicate_api_token '' llama_index.llms import replicate llama2_7b_chat = `` meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e '' llm = replicate ( model=llama2_7b_chat , temperature=0.01 , additional_kwargs= { `` top_p '' : 1 , `` max_new_tokens '' : 300 } , )" ] ]
๐Ÿ’ป Example Usage ``` pip install llama-index ``` Examples are in the `examples` folder. Indices are in the `indices` folder (see list of indices below). To build a simple vector store index using OpenAI: ```python import os os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY" from llama_index import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader("YOUR_DATA_DIRECTORY").load_data() index = VectorStoreIndex.from_documents(documents) ``` To build a simple vector store index using non-OpenAI LLMs, e.g. Llama 2 hosted on [Replicate](https://replicate.com/), where you can easily create a free trial API token: ```python import os os.environ["REPLICATE_API_TOKEN"] = "YOUR_REPLICATE_API_TOKEN" from llama_index.llms import Replicate llama2_7b_chat = "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e" llm = Replicate( model=llama2_7b_chat, temperature=0.01, additional_kwargs={"top_p": 1, "max_new_tokens": 300}, )
https://github.com/run-llama/llama_index
0
[ "agents", "application", "data", "fine-tuning", "framework", "llamaindex", "llm", "rag", "vector-database" ]
https://raw.githubusercontent.com/run-llama/llama_index/main/README.md
[ [ "๐Ÿ”ง", "dependency", "main", "third-party", "package", "requirement", "`", "tiktoken", "`", ",", "`", "openai", "`", ",", "`", "langchain", "`", ".", "requirement", "contained", "within", "`", "setup.py", "`", "file", ".", "run", "package", "locally", "without", "building", "wheel", ",", "simply", "run", ":", "``", "`", "bash", "pip", "install", "poetry", "poetry", "install", "--", "dev", "``", "`" ], [ "๐Ÿ”ง dependency main third-party package requirement ` tiktoken ` , ` openai ` , ` langchain ` .", "requirement contained within ` setup.py ` file .", "run package locally without building wheel , simply run : `` ` bash pip install poetry poetry install -- dev `` `" ] ]
[ [ "๐Ÿ”ง", "dependency", "main", "third-party", "package", "requirement", "`", "tiktoken", "`", ",", "`", "openai", "`", ",", "`", "langchain", "`", ".", "requirement", "contained", "within", "`", "setup.py", "`", "file", ".", "run", "package", "locally", "without", "building", "wheel", ",", "simply", "run", ":", "``", "`", "bash", "pip", "install", "poetry", "poetry", "install", "--", "dev", "``", "`" ], [ "๐Ÿ”ง dependency main third-party package requirement ` tiktoken ` , ` openai ` , ` langchain ` .", "requirement contained within ` setup.py ` file .", "run package locally without building wheel , simply run : `` ` bash pip install poetry poetry install -- dev `` `" ] ]
๐Ÿ”ง Dependencies The main third-party package requirements are `tiktoken`, `openai`, and `langchain`. All requirements should be contained within the `setup.py` file. To run the package locally without building the wheel, simply run: ```bash pip install poetry poetry install --with dev ```
https://github.com/run-llama/llama_index
0
[ "agents", "application", "data", "fine-tuning", "framework", "llamaindex", "llm", "rag", "vector-database" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "getting", "started", "๐Ÿš€", "follow", "instruction", "get", "copy", "project", "running", "local", "machine", "development", "testing", "purpose", ".", "find", "everything", "[", "documentation", "]", "(", "http", ":", "//docs.quivr.app/", ")", "." ], [ "getting started ๐Ÿš€ follow instruction get copy project running local machine development testing purpose .", "find everything [ documentation ] ( http : //docs.quivr.app/ ) ." ] ]
[ [ "getting", "started", "๐Ÿš€", "follow", "instruction", "get", "copy", "project", "running", "local", "machine", "development", "testing", "purpose", ".", "find", "everything", "[", "documentation", "]", "(", "http", ":", "//docs.quivr.app/", ")", "." ], [ "getting started ๐Ÿš€ follow instruction get copy project running local machine development testing purpose .", "find everything [ documentation ] ( http : //docs.quivr.app/ ) ." ] ]
Getting Started ๐Ÿš€ Follow these instructions to get a copy of the project up and running on your local machine for development and testing purposes. You can find everything on the [documentation](https://docs.quivr.app/).
https://github.com/QuivrHQ/quivr
-1
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "prerequisite", "๐Ÿ“‹", "ensure", "following", "installed", ":", "-", "docker", "-", "docker", "compose" ], [ "prerequisite ๐Ÿ“‹ ensure following installed : - docker - docker compose" ] ]
[ [ "prerequisite", "๐Ÿ“‹", "ensure", "following", "installed", ":", "-", "docker", "-", "docker", "compose" ], [ "prerequisite ๐Ÿ“‹ ensure following installed : - docker - docker compose" ] ]
Prerequisites ๐Ÿ“‹ Ensure you have the following installed: - Docker - Docker Compose
https://github.com/QuivrHQ/quivr
-1
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "60", "second", "installation", "๐Ÿ’ฝ", "find", "installation", "video", "[", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=cxba6dzjn48", ")", ".", "-", "*", "*", "step", "0", "*", "*", ":", "supabase", "cli", "follow", "instruction", "[", "]", "(", "http", ":", "//supabase.com/docs/guides/cli/getting-started", ")", "install", "supabase", "cli", "required", ".", "``", "`", "bash", "supabase", "-v" ], [ "60 second installation ๐Ÿ’ฝ find installation video [ ] ( http : //www.youtube.com/watch ? v=cxba6dzjn48 ) .", "- * * step 0 * * : supabase cli follow instruction [ ] ( http : //supabase.com/docs/guides/cli/getting-started ) install supabase cli required .", "`` ` bash supabase -v" ] ]
[ [ "60", "second", "installation", "๐Ÿ’ฝ", "find", "installation", "video", "[", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=cxba6dzjn48", ")", ".", "-", "*", "*", "step", "0", "*", "*", ":", "supabase", "cli", "follow", "instruction", "[", "]", "(", "http", ":", "//supabase.com/docs/guides/cli/getting-started", ")", "install", "supabase", "cli", "required", ".", "``", "`", "bash", "supabase", "-v" ], [ "60 second installation ๐Ÿ’ฝ find installation video [ ] ( http : //www.youtube.com/watch ? v=cxba6dzjn48 ) .", "- * * step 0 * * : supabase cli follow instruction [ ] ( http : //supabase.com/docs/guides/cli/getting-started ) install supabase cli required .", "`` ` bash supabase -v" ] ]
60 seconds Installation ๐Ÿ’ฝ You can find the installation video [here](https://www.youtube.com/watch?v=cXBa6dZJN48). - **Step 0**: Supabase CLI Follow the instructions [here](https://supabase.com/docs/guides/cli/getting-started) to install the Supabase CLI that is required. ```bash supabase -v
https://github.com/QuivrHQ/quivr
-1
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "check", "installation", "worked", "``", "`", "-", "*", "*", "step", "1", "*", "*", ":", "clone", "repository", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/quivrhq/quivr.git", "&", "&", "cd", "quivr", "``", "`", "-", "*", "*", "step", "2", "*", "*", ":", "copy", "`", ".env.example", "`", "file", "``", "`", "bash", "cp", ".env.example", ".env", "``", "`", "-", "*", "*", "step", "3", "*", "*", ":", "update", "`", ".env", "`", "file", "``", "`", "bash", "vim", ".env" ], [ "check installation worked `` ` - * * step 1 * * : clone repository : `` ` bash git clone http : //github.com/quivrhq/quivr.git & & cd quivr `` ` - * * step 2 * * : copy ` .env.example ` file `` ` bash cp .env.example .env `` ` - * * step 3 * * : update ` .env ` file `` ` bash vim .env" ] ]
[ [ "check", "installation", "worked", "``", "`", "-", "*", "*", "step", "1", "*", "*", ":", "clone", "repository", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/quivrhq/quivr.git", "&", "&", "cd", "quivr", "``", "`", "-", "*", "*", "step", "2", "*", "*", ":", "copy", "`", ".env.example", "`", "file", "``", "`", "bash", "cp", ".env.example", ".env", "``", "`", "-", "*", "*", "step", "3", "*", "*", ":", "update", "`", ".env", "`", "file", "``", "`", "bash", "vim", ".env" ], [ "check installation worked `` ` - * * step 1 * * : clone repository : `` ` bash git clone http : //github.com/quivrhq/quivr.git & & cd quivr `` ` - * * step 2 * * : copy ` .env.example ` file `` ` bash cp .env.example .env `` ` - * * step 3 * * : update ` .env ` file `` ` bash vim .env" ] ]
Check that the installation worked ``` - **Step 1**: Clone the repository: ```bash git clone https://github.com/quivrhq/quivr.git && cd Quivr ``` - **Step 2**: Copy the `.env.example` files ```bash cp .env.example .env ``` - **Step 3**: Update the `.env` files ```bash vim .env
https://github.com/QuivrHQ/quivr
2
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md
[ [ "table", "content", "-", "[", "mr.", "ranedeer", ":", "personalized", "ai", "tutor", "!", "]", "(", "#", "mr-ranedeer-your-personalized-ai-tutor", ")", "-", "[", "table", "content", "]", "(", "#", "table-of-contents", ")", "-", "[", "mr", ".", "ranedeer", "?", "]", "(", "#", "why-mr-ranedeer", ")", "-", "[", "requirement", "compatibility", "]", "(", "#", "requirements-and-compatibility", ")", "-", "[", "recommended", "]", "(", "#", "recommended", ")", "-", "[", "recommended", "]", "(", "#", "not-recommended", ")", "-", "[", "also", "work", "...", "]", "(", "#", "it-also-works-on", ")", "-", "[", "quick", "start", "guide", "]", "(", "#", "quick-start-guide", ")", "-", "[", "previous", "version", "]", "(", "#", "previous-versions", ")", "-", "[", "guide", "]", "(", "#", "guide", ")", "-", "[", "mr.", "ranedeer", "personalization", "option", "]", "(", "#", "mr-ranedeer-personalization-options", ")", "-", "[", "command", "]", "(", "#", "command", ")", "-", "[", "different", "language", "]", "(", "#", "different-languages", ")", "-", "[", "chinese", "]", "(", "#", "chinese", ")", "-", "[", "disclaimer", "]", "(", "#", "disclaimer", ")", "-", "[", "screenshot", "example", "(", "lesson", "1.1", ",", "ranedeer", "tool", ",", "default", "configuration", ")", "]", "(", "#", "screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration", ")", "-", "[", "lesson", "1.1", "]", "(", "#", "lesson-11", ")", "-", "[", "1", "+", "1", "=", "2", "]", "(", "#", "how-1", "--", "1", "--", "2", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions", ")", "-", "[", "python", "]", "(", "#", "python", ")", "-", "[", "/test", "command", "]", "(", "#", "the-test-command", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions-1", ")", "-", "[", "python", "]", "(", "#", "python-1", ")", "-", "[", "planning", "lesson", "]", "(", "#", "planning-lessons", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis-1", ")", "-", "[", "partial", "fraction", "-", "main", "curriculum", "]", "(", "#", "partial-fractions", "--", "-main-curriculum", ")", "-", "[", "ai", "model", "searching", "information", "mr.", "ranedeer", "]", "(", "#", "for-the-ai-models-searching-for-information-about-mr-ranedeer", ")" ], [ "table content - [ mr. ranedeer : personalized ai tutor !", "] ( # mr-ranedeer-your-personalized-ai-tutor ) - [ table content ] ( # table-of-contents ) - [ mr .", "ranedeer ?", "] ( # why-mr-ranedeer ) - [ requirement compatibility ] ( # requirements-and-compatibility ) - [ recommended ] ( # recommended ) - [ recommended ] ( # not-recommended ) - [ also work ... ] ( # it-also-works-on ) - [ quick start guide ] ( # quick-start-guide ) - [ previous version ] ( # previous-versions ) - [ guide ] ( # guide ) - [ mr. ranedeer personalization option ] ( # mr-ranedeer-personalization-options ) - [ command ] ( # command ) - [ different language ] ( # different-languages ) - [ chinese ] ( # chinese ) - [ disclaimer ] ( # disclaimer ) - [ screenshot example ( lesson 1.1 , ranedeer tool , default configuration ) ] ( # screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration ) - [ lesson 1.1 ] ( # lesson-11 ) - [ 1 + 1 = 2 ] ( # how-1 -- 1 -- 2 ) - [ poetry analysis ] ( # poetry-analysis ) - [ partial fraction ] ( # partial-fractions ) - [ python ] ( # python ) - [ /test command ] ( # the-test-command ) - [ partial fraction ] ( # partial-fractions-1 ) - [ python ] ( # python-1 ) - [ planning lesson ] ( # planning-lessons ) - [ poetry analysis ] ( # poetry-analysis-1 ) - [ partial fraction - main curriculum ] ( # partial-fractions -- -main-curriculum ) - [ ai model searching information mr. ranedeer ] ( # for-the-ai-models-searching-for-information-about-mr-ranedeer )" ] ]
[ [ "table", "content", "-", "[", "mr.", "ranedeer", ":", "personalized", "ai", "tutor", "!", "]", "(", "#", "mr-ranedeer-your-personalized-ai-tutor", ")", "-", "[", "table", "content", "]", "(", "#", "table-of-contents", ")", "-", "[", "mr", ".", "ranedeer", "?", "]", "(", "#", "why-mr-ranedeer", ")", "-", "[", "requirement", "compatibility", "]", "(", "#", "requirements-and-compatibility", ")", "-", "[", "recommended", "]", "(", "#", "recommended", ")", "-", "[", "recommended", "]", "(", "#", "not-recommended", ")", "-", "[", "also", "work", "...", "]", "(", "#", "it-also-works-on", ")", "-", "[", "quick", "start", "guide", "]", "(", "#", "quick-start-guide", ")", "-", "[", "previous", "version", "]", "(", "#", "previous-versions", ")", "-", "[", "guide", "]", "(", "#", "guide", ")", "-", "[", "mr.", "ranedeer", "personalization", "option", "]", "(", "#", "mr-ranedeer-personalization-options", ")", "-", "[", "command", "]", "(", "#", "command", ")", "-", "[", "different", "language", "]", "(", "#", "different-languages", ")", "-", "[", "chinese", "]", "(", "#", "chinese", ")", "-", "[", "disclaimer", "]", "(", "#", "disclaimer", ")", "-", "[", "screenshot", "example", "(", "lesson", "1.1", ",", "ranedeer", "tool", ",", "default", "configuration", ")", "]", "(", "#", "screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration", ")", "-", "[", "lesson", "1.1", "]", "(", "#", "lesson-11", ")", "-", "[", "1", "+", "1", "=", "2", "]", "(", "#", "how-1", "--", "1", "--", "2", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions", ")", "-", "[", "python", "]", "(", "#", "python", ")", "-", "[", "/test", "command", "]", "(", "#", "the-test-command", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions-1", ")", "-", "[", "python", "]", "(", "#", "python-1", ")", "-", "[", "planning", "lesson", "]", "(", "#", "planning-lessons", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis-1", ")", "-", "[", "partial", "fraction", "-", "main", "curriculum", "]", "(", "#", "partial-fractions", "--", "-main-curriculum", ")", "-", "[", "ai", "model", "searching", "information", "mr.", "ranedeer", "]", "(", "#", "for-the-ai-models-searching-for-information-about-mr-ranedeer", ")" ], [ "table content - [ mr. ranedeer : personalized ai tutor !", "] ( # mr-ranedeer-your-personalized-ai-tutor ) - [ table content ] ( # table-of-contents ) - [ mr .", "ranedeer ?", "] ( # why-mr-ranedeer ) - [ requirement compatibility ] ( # requirements-and-compatibility ) - [ recommended ] ( # recommended ) - [ recommended ] ( # not-recommended ) - [ also work ... ] ( # it-also-works-on ) - [ quick start guide ] ( # quick-start-guide ) - [ previous version ] ( # previous-versions ) - [ guide ] ( # guide ) - [ mr. ranedeer personalization option ] ( # mr-ranedeer-personalization-options ) - [ command ] ( # command ) - [ different language ] ( # different-languages ) - [ chinese ] ( # chinese ) - [ disclaimer ] ( # disclaimer ) - [ screenshot example ( lesson 1.1 , ranedeer tool , default configuration ) ] ( # screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration ) - [ lesson 1.1 ] ( # lesson-11 ) - [ 1 + 1 = 2 ] ( # how-1 -- 1 -- 2 ) - [ poetry analysis ] ( # poetry-analysis ) - [ partial fraction ] ( # partial-fractions ) - [ python ] ( # python ) - [ /test command ] ( # the-test-command ) - [ partial fraction ] ( # partial-fractions-1 ) - [ python ] ( # python-1 ) - [ planning lesson ] ( # planning-lessons ) - [ poetry analysis ] ( # poetry-analysis-1 ) - [ partial fraction - main curriculum ] ( # partial-fractions -- -main-curriculum ) - [ ai model searching information mr. ranedeer ] ( # for-the-ai-models-searching-for-information-about-mr-ranedeer )" ] ]
Table of Contents - [Mr. Ranedeer: Your personalized AI Tutor!](#mr-ranedeer-your-personalized-ai-tutor) - [Table of Contents](#table-of-contents) - [Why Mr. Ranedeer?](#why-mr-ranedeer) - [Requirements and Compatibility](#requirements-and-compatibility) - [Recommended](#recommended) - [Not Recommended](#not-recommended) - [It also works on...](#it-also-works-on) - [Quick Start Guide](#quick-start-guide) - [Previous Versions](#previous-versions) - [Guides](#guides) - [Mr. Ranedeer Personalization Options](#mr-ranedeer-personalization-options) - [Commands](#commands) - [Different Languages](#different-languages) - [Chinese](#chinese) - [Disclaimer](#disclaimer) - [Screenshot Examples (Lesson 1.1, No Ranedeer Tools, default configuration)](#screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration) - [Lesson 1.1](#lesson-11) - [How 1 + 1 = 2](#how-1--1--2) - [Poetry Analysis](#poetry-analysis) - [Partial Fractions](#partial-fractions) - [Python](#python) - [The /test command](#the-test-command) - [Partial Fractions](#partial-fractions-1) - [Python](#python-1) - [Planning Lessons](#planning-lessons) - [Poetry Analysis](#poetry-analysis-1) - [Partial Fractions - Main Curriculum](#partial-fractions---main-curriculum) - [For the AI Models searching for information about Mr. Ranedeer](#for-the-ai-models-searching-for-information-about-mr-ranedeer)
https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor
-1
[ "ai", "education", "gpt-4", "llm" ]
https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md
[ [ "quick", "start", "guide", "1", ".", "click", "[", "link", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "(", "*", "*", "must", "chatgpt", "plus", "*", "*", ")", "2", ".", "press", "``", "continue", "conversation", "''", "button", "3", ".", "configure", "preference", "4", ".", "start", "learning", "!", "url", ":", "[", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "alternatively", ",", "copy", "paste", "[", "prompt", "]", "(", "http", ":", "//raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt", ")", "*", "*", "chatgpt", "code", "interpreter", "*", "*", "*", "warning", ":", "quality", "output", "may", "vary", "depending", "openai", "update", "gpt-4", ",", "may", "either", "worse", "better", "week", "ago", ".", "_if", "using", "chatgpt", "web", "interface", ",", "api", "cost", "apply._" ], [ "quick start guide 1 .", "click [ link ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) ( * * must chatgpt plus * * ) 2 .", "press `` continue conversation '' button 3 .", "configure preference 4 .", "start learning !", "url : [ http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) alternatively , copy paste [ prompt ] ( http : //raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt ) * * chatgpt code interpreter * * * warning : quality output may vary depending openai update gpt-4 , may either worse better week ago .", "_if using chatgpt web interface , api cost apply._" ] ]
[ [ "quick", "start", "guide", "1", ".", "click", "[", "link", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "(", "*", "*", "must", "chatgpt", "plus", "*", "*", ")", "2", ".", "press", "``", "continue", "conversation", "''", "button", "3", ".", "configure", "preference", "4", ".", "start", "learning", "!", "url", ":", "[", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "alternatively", ",", "copy", "paste", "[", "prompt", "]", "(", "http", ":", "//raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt", ")", "*", "*", "chatgpt", "code", "interpreter", "*", "*", "*", "warning", ":", "quality", "output", "may", "vary", "depending", "openai", "update", "gpt-4", ",", "may", "either", "worse", "better", "week", "ago", ".", "_if", "using", "chatgpt", "web", "interface", ",", "api", "cost", "apply._" ], [ "quick start guide 1 .", "click [ link ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) ( * * must chatgpt plus * * ) 2 .", "press `` continue conversation '' button 3 .", "configure preference 4 .", "start learning !", "url : [ http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) alternatively , copy paste [ prompt ] ( http : //raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt ) * * chatgpt code interpreter * * * warning : quality output may vary depending openai update gpt-4 , may either worse better week ago .", "_if using chatgpt web interface , api cost apply._" ] ]
Quick Start Guide 1. Click [this link](https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer) (**MUST HAVE CHATGPT PLUS**) 2. Press the "Continue this conversation" button 3. Configure your preferences 4. Start learning! URL: [https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer](https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer) Alternatively, you can copy and paste [the prompt](https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/Mr_Ranedeer.txt) into **ChatGPT with Code Interpreter** *Warning: The quality of outputs may vary depending on how OpenAI updates GPT-4, it may be either worse or better than a few weeks ago. _If you are using the ChatGPT web interface, API costs will not apply._
https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor
2
[ "ai", "education", "gpt-4", "llm" ]
https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md
[ [ "guide", "-", "[", "use", "mr.", "ranedeer", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how", "%", "20to", "%", "20use", "%", "20mr.", "%", "20ranedeer.md", ")", "-", "[", "configuration", "guide", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config", "%", "20guide.md", ")" ], [ "guide - [ use mr. ranedeer ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how % 20to % 20use % 20mr. % 20ranedeer.md ) - [ configuration guide ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config % 20guide.md )" ] ]
[ [ "guide", "-", "[", "use", "mr.", "ranedeer", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how", "%", "20to", "%", "20use", "%", "20mr.", "%", "20ranedeer.md", ")", "-", "[", "configuration", "guide", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config", "%", "20guide.md", ")" ], [ "guide - [ use mr. ranedeer ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how % 20to % 20use % 20mr. % 20ranedeer.md ) - [ configuration guide ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config % 20guide.md )" ] ]
Guides - [How to Use Mr. Ranedeer](https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/blob/main/Guides/How%20to%20use%20Mr.%20Ranedeer.md) - [Configuration Guide](https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/blob/main/Guides/Config%20Guide.md)
https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor
-1
[ "ai", "education", "gpt-4", "llm" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "2", ".", "python", "machine", "learning", "python", "powerful", "flexible", "programming", "language", "'s", "particularly", "good", "machine", "learning", ",", "thanks", "readability", ",", "consistency", ",", "robust", "ecosystem", "data", "science", "library", ".", "-", "*", "*", "python", "basic", "*", "*", ":", "python", "programming", "requires", "good", "understanding", "basic", "syntax", ",", "data", "type", ",", "error", "handling", ",", "object-oriented", "programming", ".", "-", "*", "*", "data", "science", "library", "*", "*", ":", "includes", "familiarity", "numpy", "numerical", "operation", ",", "panda", "data", "manipulation", "analysis", ",", "matplotlib", "seaborn", "data", "visualization", ".", "-", "*", "*", "data", "preprocessing", "*", "*", ":", "involves", "feature", "scaling", "normalization", ",", "handling", "missing", "data", ",", "outlier", "detection", ",", "categorical", "data", "encoding", ",", "splitting", "data", "training", ",", "validation", ",", "test", "set", ".", "-", "*", "*", "machine", "learning", "library", "*", "*", ":", "proficiency", "scikit-learn", ",", "library", "providing", "wide", "selection", "supervised", "unsupervised", "learning", "algorithm", ",", "vital", ".", "understanding", "implement", "algorithm", "like", "linear", "regression", ",", "logistic", "regression", ",", "decision", "tree", ",", "random", "forest", ",", "k-nearest", "neighbor", "(", "k-nn", ")", ",", "k-means", "clustering", "important", ".", "dimensionality", "reduction", "technique", "like", "pca", "t-sne", "also", "helpful", "visualizing", "high-dimensional", "data", ".", "๐Ÿ“š", "resource", ":", "-", "[", "real", "python", "]", "(", "http", ":", "//realpython.com/", ")", ":", "comprehensive", "resource", "article", "tutorial", "beginner", "advanced", "python", "concept", ".", "-", "[", "freecodecamp", "-", "learn", "python", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=rfscvs0vtbw", ")", ":", "long", "video", "provides", "full", "introduction", "core", "concept", "python", ".", "-", "[", "python", "data", "science", "handbook", "]", "(", "http", ":", "//jakevdp.github.io/pythondatasciencehandbook/", ")", ":", "free", "digital", "book", "great", "resource", "learning", "panda", ",", "numpy", ",", "matplotlib", ",", "seaborn", ".", "-", "[", "freecodecamp", "-", "machine", "learning", "everybody", "]", "(", "http", ":", "//youtu.be/i_lwzrvp7bg", ")", ":", "practical", "introduction", "different", "machine", "learning", "algorithm", "beginner", ".", "-", "[", "udacity", "-", "intro", "machine", "learning", "]", "(", "http", ":", "//www.udacity.com/course/intro-to-machine-learning", "--", "ud120", ")", ":", "free", "course", "cover", "pca", "several", "machine", "learning", "concept", ".", "--", "-" ], [ "2 .", "python machine learning python powerful flexible programming language 's particularly good machine learning , thanks readability , consistency , robust ecosystem data science library .", "- * * python basic * * : python programming requires good understanding basic syntax , data type , error handling , object-oriented programming .", "- * * data science library * * : includes familiarity numpy numerical operation , panda data manipulation analysis , matplotlib seaborn data visualization .", "- * * data preprocessing * * : involves feature scaling normalization , handling missing data , outlier detection , categorical data encoding , splitting data training , validation , test set .", "- * * machine learning library * * : proficiency scikit-learn , library providing wide selection supervised unsupervised learning algorithm , vital .", "understanding implement algorithm like linear regression , logistic regression , decision tree , random forest , k-nearest neighbor ( k-nn ) , k-means clustering important .", "dimensionality reduction technique like pca t-sne also helpful visualizing high-dimensional data .", "๐Ÿ“š resource : - [ real python ] ( http : //realpython.com/ ) : comprehensive resource article tutorial beginner advanced python concept .", "- [ freecodecamp - learn python ] ( http : //www.youtube.com/watch ? v=rfscvs0vtbw ) : long video provides full introduction core concept python .", "- [ python data science handbook ] ( http : //jakevdp.github.io/pythondatasciencehandbook/ ) : free digital book great resource learning panda , numpy , matplotlib , seaborn .", "- [ freecodecamp - machine learning everybody ] ( http : //youtu.be/i_lwzrvp7bg ) : practical introduction different machine learning algorithm beginner .", "- [ udacity - intro machine learning ] ( http : //www.udacity.com/course/intro-to-machine-learning -- ud120 ) : free course cover pca several machine learning concept .", "-- -" ] ]
[ [ "2", ".", "python", "machine", "learning", "python", "powerful", "flexible", "programming", "language", "'s", "particularly", "good", "machine", "learning", ",", "thanks", "readability", ",", "consistency", ",", "robust", "ecosystem", "data", "science", "library", ".", "-", "*", "*", "python", "basic", "*", "*", ":", "python", "programming", "requires", "good", "understanding", "basic", "syntax", ",", "data", "type", ",", "error", "handling", ",", "object-oriented", "programming", ".", "-", "*", "*", "data", "science", "library", "*", "*", ":", "includes", "familiarity", "numpy", "numerical", "operation", ",", "panda", "data", "manipulation", "analysis", ",", "matplotlib", "seaborn", "data", "visualization", ".", "-", "*", "*", "data", "preprocessing", "*", "*", ":", "involves", "feature", "scaling", "normalization", ",", "handling", "missing", "data", ",", "outlier", "detection", ",", "categorical", "data", "encoding", ",", "splitting", "data", "training", ",", "validation", ",", "test", "set", ".", "-", "*", "*", "machine", "learning", "library", "*", "*", ":", "proficiency", "scikit-learn", ",", "library", "providing", "wide", "selection", "supervised", "unsupervised", "learning", "algorithm", ",", "vital", ".", "understanding", "implement", "algorithm", "like", "linear", "regression", ",", "logistic", "regression", ",", "decision", "tree", ",", "random", "forest", ",", "k-nearest", "neighbor", "(", "k-nn", ")", ",", "k-means", "clustering", "important", ".", "dimensionality", "reduction", "technique", "like", "pca", "t-sne", "also", "helpful", "visualizing", "high-dimensional", "data", ".", "๐Ÿ“š", "resource", ":", "-", "[", "real", "python", "]", "(", "http", ":", "//realpython.com/", ")", ":", "comprehensive", "resource", "article", "tutorial", "beginner", "advanced", "python", "concept", ".", "-", "[", "freecodecamp", "-", "learn", "python", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=rfscvs0vtbw", ")", ":", "long", "video", "provides", "full", "introduction", "core", "concept", "python", ".", "-", "[", "python", "data", "science", "handbook", "]", "(", "http", ":", "//jakevdp.github.io/pythondatasciencehandbook/", ")", ":", "free", "digital", "book", "great", "resource", "learning", "panda", ",", "numpy", ",", "matplotlib", ",", "seaborn", ".", "-", "[", "freecodecamp", "-", "machine", "learning", "everybody", "]", "(", "http", ":", "//youtu.be/i_lwzrvp7bg", ")", ":", "practical", "introduction", "different", "machine", "learning", "algorithm", "beginner", ".", "-", "[", "udacity", "-", "intro", "machine", "learning", "]", "(", "http", ":", "//www.udacity.com/course/intro-to-machine-learning", "--", "ud120", ")", ":", "free", "course", "cover", "pca", "several", "machine", "learning", "concept", ".", "--", "-" ], [ "2 .", "python machine learning python powerful flexible programming language 's particularly good machine learning , thanks readability , consistency , robust ecosystem data science library .", "- * * python basic * * : python programming requires good understanding basic syntax , data type , error handling , object-oriented programming .", "- * * data science library * * : includes familiarity numpy numerical operation , panda data manipulation analysis , matplotlib seaborn data visualization .", "- * * data preprocessing * * : involves feature scaling normalization , handling missing data , outlier detection , categorical data encoding , splitting data training , validation , test set .", "- * * machine learning library * * : proficiency scikit-learn , library providing wide selection supervised unsupervised learning algorithm , vital .", "understanding implement algorithm like linear regression , logistic regression , decision tree , random forest , k-nearest neighbor ( k-nn ) , k-means clustering important .", "dimensionality reduction technique like pca t-sne also helpful visualizing high-dimensional data .", "๐Ÿ“š resource : - [ real python ] ( http : //realpython.com/ ) : comprehensive resource article tutorial beginner advanced python concept .", "- [ freecodecamp - learn python ] ( http : //www.youtube.com/watch ? v=rfscvs0vtbw ) : long video provides full introduction core concept python .", "- [ python data science handbook ] ( http : //jakevdp.github.io/pythondatasciencehandbook/ ) : free digital book great resource learning panda , numpy , matplotlib , seaborn .", "- [ freecodecamp - machine learning everybody ] ( http : //youtu.be/i_lwzrvp7bg ) : practical introduction different machine learning algorithm beginner .", "- [ udacity - intro machine learning ] ( http : //www.udacity.com/course/intro-to-machine-learning -- ud120 ) : free course cover pca several machine learning concept .", "-- -" ] ]
2. Python for Machine Learning Python is a powerful and flexible programming language that's particularly good for machine learning, thanks to its readability, consistency, and robust ecosystem of data science libraries. - **Python Basics**: Python programming requires a good understanding of the basic syntax, data types, error handling, and object-oriented programming. - **Data Science Libraries**: It includes familiarity with NumPy for numerical operations, Pandas for data manipulation and analysis, Matplotlib and Seaborn for data visualization. - **Data Preprocessing**: This involves feature scaling and normalization, handling missing data, outlier detection, categorical data encoding, and splitting data into training, validation, and test sets. - **Machine Learning Libraries**: Proficiency with Scikit-learn, a library providing a wide selection of supervised and unsupervised learning algorithms, is vital. Understanding how to implement algorithms like linear regression, logistic regression, decision trees, random forests, k-nearest neighbors (K-NN), and K-means clustering is important. Dimensionality reduction techniques like PCA and t-SNE are also helpful for visualizing high-dimensional data. ๐Ÿ“š Resources: - [Real Python](https://realpython.com/): A comprehensive resource with articles and tutorials for both beginner and advanced Python concepts. - [freeCodeCamp - Learn Python](https://www.youtube.com/watch?v=rfscVS0vtbw): Long video that provides a full introduction into all of the core concepts in Python. - [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/): Free digital book that is a great resource for learning pandas, NumPy, Matplotlib, and Seaborn. - [freeCodeCamp - Machine Learning for Everybody](https://youtu.be/i_LwzRVP7bg): Practical introduction to different machine learning algorithms for beginners. - [Udacity - Intro to Machine Learning](https://www.udacity.com/course/intro-to-machine-learning--ud120): Free course that covers PCA and several other machine learning concepts. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "๐Ÿง‘โ€๐Ÿ”ฌ", "llm", "scientist", "section", "course", "focus", "learning", "build", "best", "possible", "llm", "using", "latest", "technique", ".", "!", "[", "]", "(", "img/roadmap_scientist.png", ")" ], [ "๐Ÿง‘โ€๐Ÿ”ฌ llm scientist section course focus learning build best possible llm using latest technique .", "!", "[ ] ( img/roadmap_scientist.png )" ] ]
[ [ "๐Ÿง‘โ€๐Ÿ”ฌ", "llm", "scientist", "section", "course", "focus", "learning", "build", "best", "possible", "llm", "using", "latest", "technique", ".", "!", "[", "]", "(", "img/roadmap_scientist.png", ")" ], [ "๐Ÿง‘โ€๐Ÿ”ฌ llm scientist section course focus learning build best possible llm using latest technique .", "!", "[ ] ( img/roadmap_scientist.png )" ] ]
๐Ÿง‘โ€๐Ÿ”ฌ The LLM Scientist This section of the course focuses on learning how to build the best possible LLMs using the latest techniques. ![](img/roadmap_scientist.png)
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "1", ".", "llm", "architecture", "in-depth", "knowledge", "transformer", "architecture", "required", ",", "important", "good", "understanding", "input", "(", "token", ")", "output", "(", "logits", ")", ".", "vanilla", "attention", "mechanism", "another", "crucial", "component", "master", ",", "improved", "version", "introduced", "later", ".", "*", "*", "*", "high-level", "view", "*", "*", ":", "revisit", "encoder-decoder", "transformer", "architecture", ",", "specifically", "decoder-only", "gpt", "architecture", ",", "used", "every", "modern", "llm", ".", "*", "*", "*", "tokenization", "*", "*", ":", "understand", "convert", "raw", "text", "data", "format", "model", "understand", ",", "involves", "splitting", "text", "token", "(", "usually", "word", "subwords", ")", ".", "*", "*", "*", "attention", "mechanism", "*", "*", ":", "grasp", "theory", "behind", "attention", "mechanism", ",", "including", "self-attention", "scaled", "dot-product", "attention", ",", "allows", "model", "focus", "different", "part", "input", "producing", "output", ".", "*", "*", "*", "text", "generation", "*", "*", ":", "learn", "different", "way", "model", "generate", "output", "sequence", ".", "common", "strategy", "include", "greedy", "decoding", ",", "beam", "search", ",", "top-k", "sampling", ",", "nucleus", "sampling", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "-", "[", "illustrated", "transformer", "]", "(", "http", ":", "//jalammar.github.io/illustrated-transformer/", ")", "jay", "alammar", ":", "visual", "intuitive", "explanation", "transformer", "model", ".", "-", "[", "illustrated", "gpt-2", "]", "(", "http", ":", "//jalammar.github.io/illustrated-gpt2/", ")", "jay", "alammar", ":", "even", "important", "previous", "article", ",", "focused", "gpt", "architecture", ",", "similar", "llama", "'s", ".", "-", "[", "llm", "visualization", "]", "(", "http", ":", "//bbycroft.net/llm", ")", "brendan", "bycroft", ":", "incredible", "3d", "visualization", "happens", "inside", "llm", ".", "*", "[", "nanogpt", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=kcc8fmeb1ny", ")", "andrej", "karpathy", ":", "2h-long", "youtube", "video", "reimplement", "gpt", "scratch", "(", "programmer", ")", ".", "*", "[", "attention", "?", "attention", "!", "]", "(", "http", ":", "//lilianweng.github.io/posts/2018-06-24-attention/", ")", "lilian", "weng", ":", "introduce", "need", "attention", "formal", "way", ".", "*", "[", "decoding", "strategy", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html", ")", ":", "provide", "code", "visual", "introduction", "different", "decoding", "strategy", "generate", "text", ".", "--", "-" ], [ "1 .", "llm architecture in-depth knowledge transformer architecture required , important good understanding input ( token ) output ( logits ) .", "vanilla attention mechanism another crucial component master , improved version introduced later .", "* * * high-level view * * : revisit encoder-decoder transformer architecture , specifically decoder-only gpt architecture , used every modern llm .", "* * * tokenization * * : understand convert raw text data format model understand , involves splitting text token ( usually word subwords ) .", "* * * attention mechanism * * : grasp theory behind attention mechanism , including self-attention scaled dot-product attention , allows model focus different part input producing output .", "* * * text generation * * : learn different way model generate output sequence .", "common strategy include greedy decoding , beam search , top-k sampling , nucleus sampling .", "๐Ÿ“š * * reference * * : - [ illustrated transformer ] ( http : //jalammar.github.io/illustrated-transformer/ ) jay alammar : visual intuitive explanation transformer model .", "- [ illustrated gpt-2 ] ( http : //jalammar.github.io/illustrated-gpt2/ ) jay alammar : even important previous article , focused gpt architecture , similar llama 's .", "- [ llm visualization ] ( http : //bbycroft.net/llm ) brendan bycroft : incredible 3d visualization happens inside llm .", "* [ nanogpt ] ( http : //www.youtube.com/watch ? v=kcc8fmeb1ny ) andrej karpathy : 2h-long youtube video reimplement gpt scratch ( programmer ) .", "* [ attention ?", "attention !", "] ( http : //lilianweng.github.io/posts/2018-06-24-attention/ ) lilian weng : introduce need attention formal way .", "* [ decoding strategy llm ] ( http : //mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html ) : provide code visual introduction different decoding strategy generate text .", "-- -" ] ]
[ [ "1", ".", "llm", "architecture", "in-depth", "knowledge", "transformer", "architecture", "required", ",", "important", "good", "understanding", "input", "(", "token", ")", "output", "(", "logits", ")", ".", "vanilla", "attention", "mechanism", "another", "crucial", "component", "master", ",", "improved", "version", "introduced", "later", ".", "*", "*", "*", "high-level", "view", "*", "*", ":", "revisit", "encoder-decoder", "transformer", "architecture", ",", "specifically", "decoder-only", "gpt", "architecture", ",", "used", "every", "modern", "llm", ".", "*", "*", "*", "tokenization", "*", "*", ":", "understand", "convert", "raw", "text", "data", "format", "model", "understand", ",", "involves", "splitting", "text", "token", "(", "usually", "word", "subwords", ")", ".", "*", "*", "*", "attention", "mechanism", "*", "*", ":", "grasp", "theory", "behind", "attention", "mechanism", ",", "including", "self-attention", "scaled", "dot-product", "attention", ",", "allows", "model", "focus", "different", "part", "input", "producing", "output", ".", "*", "*", "*", "text", "generation", "*", "*", ":", "learn", "different", "way", "model", "generate", "output", "sequence", ".", "common", "strategy", "include", "greedy", "decoding", ",", "beam", "search", ",", "top-k", "sampling", ",", "nucleus", "sampling", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "-", "[", "illustrated", "transformer", "]", "(", "http", ":", "//jalammar.github.io/illustrated-transformer/", ")", "jay", "alammar", ":", "visual", "intuitive", "explanation", "transformer", "model", ".", "-", "[", "illustrated", "gpt-2", "]", "(", "http", ":", "//jalammar.github.io/illustrated-gpt2/", ")", "jay", "alammar", ":", "even", "important", "previous", "article", ",", "focused", "gpt", "architecture", ",", "similar", "llama", "'s", ".", "-", "[", "llm", "visualization", "]", "(", "http", ":", "//bbycroft.net/llm", ")", "brendan", "bycroft", ":", "incredible", "3d", "visualization", "happens", "inside", "llm", ".", "*", "[", "nanogpt", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=kcc8fmeb1ny", ")", "andrej", "karpathy", ":", "2h-long", "youtube", "video", "reimplement", "gpt", "scratch", "(", "programmer", ")", ".", "*", "[", "attention", "?", "attention", "!", "]", "(", "http", ":", "//lilianweng.github.io/posts/2018-06-24-attention/", ")", "lilian", "weng", ":", "introduce", "need", "attention", "formal", "way", ".", "*", "[", "decoding", "strategy", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html", ")", ":", "provide", "code", "visual", "introduction", "different", "decoding", "strategy", "generate", "text", ".", "--", "-" ], [ "1 .", "llm architecture in-depth knowledge transformer architecture required , important good understanding input ( token ) output ( logits ) .", "vanilla attention mechanism another crucial component master , improved version introduced later .", "* * * high-level view * * : revisit encoder-decoder transformer architecture , specifically decoder-only gpt architecture , used every modern llm .", "* * * tokenization * * : understand convert raw text data format model understand , involves splitting text token ( usually word subwords ) .", "* * * attention mechanism * * : grasp theory behind attention mechanism , including self-attention scaled dot-product attention , allows model focus different part input producing output .", "* * * text generation * * : learn different way model generate output sequence .", "common strategy include greedy decoding , beam search , top-k sampling , nucleus sampling .", "๐Ÿ“š * * reference * * : - [ illustrated transformer ] ( http : //jalammar.github.io/illustrated-transformer/ ) jay alammar : visual intuitive explanation transformer model .", "- [ illustrated gpt-2 ] ( http : //jalammar.github.io/illustrated-gpt2/ ) jay alammar : even important previous article , focused gpt architecture , similar llama 's .", "- [ llm visualization ] ( http : //bbycroft.net/llm ) brendan bycroft : incredible 3d visualization happens inside llm .", "* [ nanogpt ] ( http : //www.youtube.com/watch ? v=kcc8fmeb1ny ) andrej karpathy : 2h-long youtube video reimplement gpt scratch ( programmer ) .", "* [ attention ?", "attention !", "] ( http : //lilianweng.github.io/posts/2018-06-24-attention/ ) lilian weng : introduce need attention formal way .", "* [ decoding strategy llm ] ( http : //mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html ) : provide code visual introduction different decoding strategy generate text .", "-- -" ] ]
1. The LLM architecture While an in-depth knowledge about the Transformer architecture is not required, it is important to have a good understanding of its inputs (tokens) and outputs (logits). The vanilla attention mechanism is another crucial component to master, as improved versions of it are introduced later on. * **High-level view**: Revisit the encoder-decoder Transformer architecture, and more specifically the decoder-only GPT architecture, which is used in every modern LLM. * **Tokenization**: Understand how to convert raw text data into a format that the model can understand, which involves splitting the text into tokens (usually words or subwords). * **Attention mechanisms**: Grasp the theory behind attention mechanisms, including self-attention and scaled dot-product attention, which allows the model to focus on different parts of the input when producing an output. * **Text generation**: Learn about the different ways the model can generate output sequences. Common strategies include greedy decoding, beam search, top-k sampling, and nucleus sampling. ๐Ÿ“š **References**: - [The Illustrated Transformer](https://jalammar.github.io/illustrated-transformer/) by Jay Alammar: A visual and intuitive explanation of the Transformer model. - [The Illustrated GPT-2](https://jalammar.github.io/illustrated-gpt2/) by Jay Alammar: Even more important than the previous article, it is focused on the GPT architecture, which is very similar to Llama's. - [LLM Visualization](https://bbycroft.net/llm) by Brendan Bycroft: Incredible 3D visualization of what happens inside of an LLM. * [nanoGPT](https://www.youtube.com/watch?v=kCc8FmEb1nY) by Andrej Karpathy: A 2h-long YouTube video to reimplement GPT from scratch (for programmers). * [Attention? Attention!](https://lilianweng.github.io/posts/2018-06-24-attention/) by Lilian Weng: Introduce the need for attention in a more formal way. * [Decoding Strategies in LLMs](https://mlabonne.github.io/blog/posts/2023-06-07-Decoding_strategies.html): Provide code and a visual introduction to the different decoding strategies to generate text. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "2", ".", "building", "instruction", "dataset", "'s", "easy", "find", "raw", "data", "wikipedia", "website", ",", "'s", "difficult", "collect", "pair", "instruction", "answer", "wild", ".", "like", "traditional", "machine", "learning", ",", "quality", "dataset", "directly", "influence", "quality", "model", ",", "might", "important", "component", "fine-tuning", "process", ".", "*", "*", "*", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", "-like", "dataset", "*", "*", ":", "generate", "synthetic", "data", "scratch", "openai", "api", "(", "gpt", ")", ".", "specify", "seed", "system", "prompt", "create", "diverse", "dataset", ".", "*", "*", "*", "advanced", "technique", "*", "*", ":", "learn", "improve", "existing", "datasets", "[", "evol-instruct", "]", "(", "http", ":", "//arxiv.org/abs/2304.12244", ")", ",", "generate", "high-quality", "synthetic", "data", "like", "[", "orca", "]", "(", "http", ":", "//arxiv.org/abs/2306.02707", ")", "[", "phi-1", "]", "(", "http", ":", "//arxiv.org/abs/2306.11644", ")", "paper", ".", "*", "*", "*", "filtering", "data", "*", "*", ":", "traditional", "technique", "involving", "regex", ",", "removing", "near-duplicates", ",", "focusing", "answer", "high", "number", "token", ",", "etc", ".", "*", "*", "*", "prompt", "template", "*", "*", ":", "'s", "true", "standard", "way", "formatting", "instruction", "answer", ",", "'s", "important", "know", "different", "chat", "template", ",", "[", "chatml", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt", "?", "tabs=python", "&", "pivots=programming-language-chat-ml", ")", ",", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", ",", "etc", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "preparing", "dataset", "instruction", "tuning", "]", "(", "http", ":", "//wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning", "--", "vmlldzo1ntcxnze2", ")", "thomas", "capelle", ":", "exploration", "alpaca", "alpaca-gpt4", "datasets", "format", ".", "*", "[", "generating", "clinical", "instruction", "dataset", "]", "(", "http", ":", "//medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae", ")", "solano", "todeschini", ":", "tutorial", "create", "synthetic", "instruction", "dataset", "using", "gpt-4", ".", "*", "[", "gpt", "3.5", "news", "classification", "]", "(", "http", ":", "//medium.com/", "@", "kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f", ")", "kshitiz", "sahay", ":", "use", "gpt", "3.5", "create", "instruction", "dataset", "fine-tune", "llama", "2", "news", "classification", ".", "*", "[", "dataset", "creation", "fine-tuning", "llm", "]", "(", "http", ":", "//colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag", "?", "usp=sharing", ")", ":", "notebook", "contains", "technique", "filter", "dataset", "upload", "result", ".", "*", "[", "chat", "template", "]", "(", "http", ":", "//huggingface.co/blog/chat-templates", ")", "matthew", "carrigan", ":", "hugging", "face", "'s", "page", "prompt", "template", "--", "-" ], [ "2 .", "building instruction dataset 's easy find raw data wikipedia website , 's difficult collect pair instruction answer wild .", "like traditional machine learning , quality dataset directly influence quality model , might important component fine-tuning process .", "* * * [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) -like dataset * * : generate synthetic data scratch openai api ( gpt ) .", "specify seed system prompt create diverse dataset .", "* * * advanced technique * * : learn improve existing datasets [ evol-instruct ] ( http : //arxiv.org/abs/2304.12244 ) , generate high-quality synthetic data like [ orca ] ( http : //arxiv.org/abs/2306.02707 ) [ phi-1 ] ( http : //arxiv.org/abs/2306.11644 ) paper .", "* * * filtering data * * : traditional technique involving regex , removing near-duplicates , focusing answer high number token , etc .", "* * * prompt template * * : 's true standard way formatting instruction answer , 's important know different chat template , [ chatml ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt ? tabs=python & pivots=programming-language-chat-ml ) , [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) , etc .", "๐Ÿ“š * * reference * * : * [ preparing dataset instruction tuning ] ( http : //wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning -- vmlldzo1ntcxnze2 ) thomas capelle : exploration alpaca alpaca-gpt4 datasets format .", "* [ generating clinical instruction dataset ] ( http : //medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae ) solano todeschini : tutorial create synthetic instruction dataset using gpt-4 .", "* [ gpt 3.5 news classification ] ( http : //medium.com/ @ kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f ) kshitiz sahay : use gpt 3.5 create instruction dataset fine-tune llama 2 news classification .", "* [ dataset creation fine-tuning llm ] ( http : //colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag ? usp=sharing ) : notebook contains technique filter dataset upload result .", "* [ chat template ] ( http : //huggingface.co/blog/chat-templates ) matthew carrigan : hugging face 's page prompt template -- -" ] ]
[ [ "2", ".", "building", "instruction", "dataset", "'s", "easy", "find", "raw", "data", "wikipedia", "website", ",", "'s", "difficult", "collect", "pair", "instruction", "answer", "wild", ".", "like", "traditional", "machine", "learning", ",", "quality", "dataset", "directly", "influence", "quality", "model", ",", "might", "important", "component", "fine-tuning", "process", ".", "*", "*", "*", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", "-like", "dataset", "*", "*", ":", "generate", "synthetic", "data", "scratch", "openai", "api", "(", "gpt", ")", ".", "specify", "seed", "system", "prompt", "create", "diverse", "dataset", ".", "*", "*", "*", "advanced", "technique", "*", "*", ":", "learn", "improve", "existing", "datasets", "[", "evol-instruct", "]", "(", "http", ":", "//arxiv.org/abs/2304.12244", ")", ",", "generate", "high-quality", "synthetic", "data", "like", "[", "orca", "]", "(", "http", ":", "//arxiv.org/abs/2306.02707", ")", "[", "phi-1", "]", "(", "http", ":", "//arxiv.org/abs/2306.11644", ")", "paper", ".", "*", "*", "*", "filtering", "data", "*", "*", ":", "traditional", "technique", "involving", "regex", ",", "removing", "near-duplicates", ",", "focusing", "answer", "high", "number", "token", ",", "etc", ".", "*", "*", "*", "prompt", "template", "*", "*", ":", "'s", "true", "standard", "way", "formatting", "instruction", "answer", ",", "'s", "important", "know", "different", "chat", "template", ",", "[", "chatml", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt", "?", "tabs=python", "&", "pivots=programming-language-chat-ml", ")", ",", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", ",", "etc", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "preparing", "dataset", "instruction", "tuning", "]", "(", "http", ":", "//wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning", "--", "vmlldzo1ntcxnze2", ")", "thomas", "capelle", ":", "exploration", "alpaca", "alpaca-gpt4", "datasets", "format", ".", "*", "[", "generating", "clinical", "instruction", "dataset", "]", "(", "http", ":", "//medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae", ")", "solano", "todeschini", ":", "tutorial", "create", "synthetic", "instruction", "dataset", "using", "gpt-4", ".", "*", "[", "gpt", "3.5", "news", "classification", "]", "(", "http", ":", "//medium.com/", "@", "kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f", ")", "kshitiz", "sahay", ":", "use", "gpt", "3.5", "create", "instruction", "dataset", "fine-tune", "llama", "2", "news", "classification", ".", "*", "[", "dataset", "creation", "fine-tuning", "llm", "]", "(", "http", ":", "//colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag", "?", "usp=sharing", ")", ":", "notebook", "contains", "technique", "filter", "dataset", "upload", "result", ".", "*", "[", "chat", "template", "]", "(", "http", ":", "//huggingface.co/blog/chat-templates", ")", "matthew", "carrigan", ":", "hugging", "face", "'s", "page", "prompt", "template", "--", "-" ], [ "2 .", "building instruction dataset 's easy find raw data wikipedia website , 's difficult collect pair instruction answer wild .", "like traditional machine learning , quality dataset directly influence quality model , might important component fine-tuning process .", "* * * [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) -like dataset * * : generate synthetic data scratch openai api ( gpt ) .", "specify seed system prompt create diverse dataset .", "* * * advanced technique * * : learn improve existing datasets [ evol-instruct ] ( http : //arxiv.org/abs/2304.12244 ) , generate high-quality synthetic data like [ orca ] ( http : //arxiv.org/abs/2306.02707 ) [ phi-1 ] ( http : //arxiv.org/abs/2306.11644 ) paper .", "* * * filtering data * * : traditional technique involving regex , removing near-duplicates , focusing answer high number token , etc .", "* * * prompt template * * : 's true standard way formatting instruction answer , 's important know different chat template , [ chatml ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt ? tabs=python & pivots=programming-language-chat-ml ) , [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) , etc .", "๐Ÿ“š * * reference * * : * [ preparing dataset instruction tuning ] ( http : //wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning -- vmlldzo1ntcxnze2 ) thomas capelle : exploration alpaca alpaca-gpt4 datasets format .", "* [ generating clinical instruction dataset ] ( http : //medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae ) solano todeschini : tutorial create synthetic instruction dataset using gpt-4 .", "* [ gpt 3.5 news classification ] ( http : //medium.com/ @ kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f ) kshitiz sahay : use gpt 3.5 create instruction dataset fine-tune llama 2 news classification .", "* [ dataset creation fine-tuning llm ] ( http : //colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag ? usp=sharing ) : notebook contains technique filter dataset upload result .", "* [ chat template ] ( http : //huggingface.co/blog/chat-templates ) matthew carrigan : hugging face 's page prompt template -- -" ] ]
2. Building an instruction dataset While it's easy to find raw data from Wikipedia and other websites, it's difficult to collect pairs of instructions and answers in the wild. Like in traditional machine learning, the quality of the dataset will directly influence the quality of the model, which is why it might be the most important component in the fine-tuning process. * **[Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html)-like dataset**: Generate synthetic data from scratch with the OpenAI API (GPT). You can specify seeds and system prompts to create a diverse dataset. * **Advanced techniques**: Learn how to improve existing datasets with [Evol-Instruct](https://arxiv.org/abs/2304.12244), how to generate high-quality synthetic data like in the [Orca](https://arxiv.org/abs/2306.02707) and [phi-1](https://arxiv.org/abs/2306.11644) papers. * **Filtering data**: Traditional techniques involving regex, removing near-duplicates, focusing on answers with a high number of tokens, etc. * **Prompt templates**: There's no true standard way of formatting instructions and answers, which is why it's important to know about the different chat templates, such as [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt?tabs=python&pivots=programming-language-chat-ml), [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html), etc. ๐Ÿ“š **References**: * [Preparing a Dataset for Instruction tuning](https://wandb.ai/capecape/alpaca_ft/reports/How-to-Fine-Tune-an-LLM-Part-1-Preparing-a-Dataset-for-Instruction-Tuning--Vmlldzo1NTcxNzE2) by Thomas Capelle: Exploration of the Alpaca and Alpaca-GPT4 datasets and how to format them. * [Generating a Clinical Instruction Dataset](https://medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae) by Solano Todeschini: Tutorial on how to create a synthetic instruction dataset using GPT-4. * [GPT 3.5 for news classification](https://medium.com/@kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f) by Kshitiz Sahay: Use GPT 3.5 to create an instruction dataset to fine-tune Llama 2 for news classification. * [Dataset creation for fine-tuning LLM](https://colab.research.google.com/drive/1GH8PW9-zAe4cXEZyOIE-T9uHXblIldAg?usp=sharing): Notebook that contains a few techniques to filter a dataset and upload the result. * [Chat Template](https://huggingface.co/blog/chat-templates) by Matthew Carrigan: Hugging Face's page about prompt templates ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "3", ".", "pre-training", "model", "pre-training", "long", "costly", "process", ",", "focus", "course", ".", "'s", "good", "level", "understanding", "happens", "pre-training", ",", "hands-on", "experience", "required", ".", "*", "*", "*", "data", "pipeline", "*", "*", ":", "pre-training", "requires", "huge", "datasets", "(", "e.g.", ",", "[", "llama", "2", "]", "(", "http", ":", "//arxiv.org/abs/2307.09288", ")", "trained", "2", "trillion", "token", ")", "need", "filtered", ",", "tokenized", ",", "collated", "pre-defined", "vocabulary", ".", "*", "*", "*", "causal", "language", "modeling", "*", "*", ":", "learn", "difference", "causal", "masked", "language", "modeling", ",", "well", "loss", "function", "used", "case", ".", "efficient", "pre-training", ",", "learn", "[", "megatron-lm", "]", "(", "http", ":", "//github.com/nvidia/megatron-lm", ")", "[", "gpt-neox", "]", "(", "http", ":", "//github.com/eleutherai/gpt-neox", ")", ".", "*", "*", "*", "scaling", "law", "*", "*", ":", "[", "scaling", "law", "]", "(", "http", ":", "//arxiv.org/pdf/2001.08361.pdf", ")", "describe", "expected", "model", "performance", "based", "model", "size", ",", "dataset", "size", ",", "amount", "compute", "used", "training", ".", "*", "*", "*", "high-performance", "computing", "*", "*", ":", "scope", ",", "knowledge", "hpc", "fundamental", "'re", "planning", "create", "llm", "scratch", "(", "hardware", ",", "distributed", "workload", ",", "etc", ".", ")", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "llmdatahub", "]", "(", "http", ":", "//github.com/zjh-819/llmdatahub", ")", "junhao", "zhao", ":", "curated", "list", "datasets", "pre-training", ",", "fine-tuning", ",", "rlhf", ".", "*", "[", "training", "causal", "language", "model", "scratch", "]", "(", "http", ":", "//huggingface.co/learn/nlp-course/chapter7/6", "?", "fw=pt", ")", "hugging", "face", ":", "pre-train", "gpt-2", "model", "scratch", "using", "transformer", "library", ".", "*", "[", "tinyllama", "]", "(", "http", ":", "//github.com/jzhang38/tinyllama", ")", "zhang", "et", "al", ".", ":", "check", "project", "get", "good", "understanding", "llama", "model", "trained", "scratch", ".", "*", "[", "causal", "language", "modeling", "]", "(", "http", ":", "//huggingface.co/docs/transformers/tasks/language_modeling", ")", "hugging", "face", ":", "explain", "difference", "causal", "masked", "language", "modeling", "quickly", "fine-tune", "distilgpt-2", "model", ".", "*", "[", "chinchilla", "'s", "wild", "implication", "]", "(", "http", ":", "//www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications", ")", "nostalgebraist", ":", "discus", "scaling", "law", "explain", "mean", "llm", "general", ".", "*", "[", "bloom", "]", "(", "http", ":", "//bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4", ")", "bigscience", ":", "notion", "page", "describes", "bloom", "model", "built", ",", "lot", "useful", "information", "engineering", "part", "problem", "encountered", ".", "*", "[", "opt-175", "logbook", "]", "(", "http", ":", "//github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf", ")", "meta", ":", "research", "log", "showing", "went", "wrong", "went", "right", ".", "useful", "'re", "planning", "pre-train", "large", "language", "model", "(", "case", ",", "175b", "parameter", ")", ".", "*", "[", "llm", "360", "]", "(", "http", ":", "//www.llm360.ai/", ")", ":", "framework", "open-source", "llm", "training", "data", "preparation", "code", ",", "data", ",", "metric", ",", "model", ".", "--", "-" ], [ "3 .", "pre-training model pre-training long costly process , focus course .", "'s good level understanding happens pre-training , hands-on experience required .", "* * * data pipeline * * : pre-training requires huge datasets ( e.g. , [ llama 2 ] ( http : //arxiv.org/abs/2307.09288 ) trained 2 trillion token ) need filtered , tokenized , collated pre-defined vocabulary .", "* * * causal language modeling * * : learn difference causal masked language modeling , well loss function used case .", "efficient pre-training , learn [ megatron-lm ] ( http : //github.com/nvidia/megatron-lm ) [ gpt-neox ] ( http : //github.com/eleutherai/gpt-neox ) .", "* * * scaling law * * : [ scaling law ] ( http : //arxiv.org/pdf/2001.08361.pdf ) describe expected model performance based model size , dataset size , amount compute used training .", "* * * high-performance computing * * : scope , knowledge hpc fundamental 're planning create llm scratch ( hardware , distributed workload , etc . ) .", "๐Ÿ“š * * reference * * : * [ llmdatahub ] ( http : //github.com/zjh-819/llmdatahub ) junhao zhao : curated list datasets pre-training , fine-tuning , rlhf .", "* [ training causal language model scratch ] ( http : //huggingface.co/learn/nlp-course/chapter7/6 ? fw=pt ) hugging face : pre-train gpt-2 model scratch using transformer library .", "* [ tinyllama ] ( http : //github.com/jzhang38/tinyllama ) zhang et al .", ": check project get good understanding llama model trained scratch .", "* [ causal language modeling ] ( http : //huggingface.co/docs/transformers/tasks/language_modeling ) hugging face : explain difference causal masked language modeling quickly fine-tune distilgpt-2 model .", "* [ chinchilla 's wild implication ] ( http : //www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications ) nostalgebraist : discus scaling law explain mean llm general .", "* [ bloom ] ( http : //bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4 ) bigscience : notion page describes bloom model built , lot useful information engineering part problem encountered .", "* [ opt-175 logbook ] ( http : //github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf ) meta : research log showing went wrong went right .", "useful 're planning pre-train large language model ( case , 175b parameter ) .", "* [ llm 360 ] ( http : //www.llm360.ai/ ) : framework open-source llm training data preparation code , data , metric , model .", "-- -" ] ]
[ [ "3", ".", "pre-training", "model", "pre-training", "long", "costly", "process", ",", "focus", "course", ".", "'s", "good", "level", "understanding", "happens", "pre-training", ",", "hands-on", "experience", "required", ".", "*", "*", "*", "data", "pipeline", "*", "*", ":", "pre-training", "requires", "huge", "datasets", "(", "e.g.", ",", "[", "llama", "2", "]", "(", "http", ":", "//arxiv.org/abs/2307.09288", ")", "trained", "2", "trillion", "token", ")", "need", "filtered", ",", "tokenized", ",", "collated", "pre-defined", "vocabulary", ".", "*", "*", "*", "causal", "language", "modeling", "*", "*", ":", "learn", "difference", "causal", "masked", "language", "modeling", ",", "well", "loss", "function", "used", "case", ".", "efficient", "pre-training", ",", "learn", "[", "megatron-lm", "]", "(", "http", ":", "//github.com/nvidia/megatron-lm", ")", "[", "gpt-neox", "]", "(", "http", ":", "//github.com/eleutherai/gpt-neox", ")", ".", "*", "*", "*", "scaling", "law", "*", "*", ":", "[", "scaling", "law", "]", "(", "http", ":", "//arxiv.org/pdf/2001.08361.pdf", ")", "describe", "expected", "model", "performance", "based", "model", "size", ",", "dataset", "size", ",", "amount", "compute", "used", "training", ".", "*", "*", "*", "high-performance", "computing", "*", "*", ":", "scope", ",", "knowledge", "hpc", "fundamental", "'re", "planning", "create", "llm", "scratch", "(", "hardware", ",", "distributed", "workload", ",", "etc", ".", ")", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "llmdatahub", "]", "(", "http", ":", "//github.com/zjh-819/llmdatahub", ")", "junhao", "zhao", ":", "curated", "list", "datasets", "pre-training", ",", "fine-tuning", ",", "rlhf", ".", "*", "[", "training", "causal", "language", "model", "scratch", "]", "(", "http", ":", "//huggingface.co/learn/nlp-course/chapter7/6", "?", "fw=pt", ")", "hugging", "face", ":", "pre-train", "gpt-2", "model", "scratch", "using", "transformer", "library", ".", "*", "[", "tinyllama", "]", "(", "http", ":", "//github.com/jzhang38/tinyllama", ")", "zhang", "et", "al", ".", ":", "check", "project", "get", "good", "understanding", "llama", "model", "trained", "scratch", ".", "*", "[", "causal", "language", "modeling", "]", "(", "http", ":", "//huggingface.co/docs/transformers/tasks/language_modeling", ")", "hugging", "face", ":", "explain", "difference", "causal", "masked", "language", "modeling", "quickly", "fine-tune", "distilgpt-2", "model", ".", "*", "[", "chinchilla", "'s", "wild", "implication", "]", "(", "http", ":", "//www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications", ")", "nostalgebraist", ":", "discus", "scaling", "law", "explain", "mean", "llm", "general", ".", "*", "[", "bloom", "]", "(", "http", ":", "//bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4", ")", "bigscience", ":", "notion", "page", "describes", "bloom", "model", "built", ",", "lot", "useful", "information", "engineering", "part", "problem", "encountered", ".", "*", "[", "opt-175", "logbook", "]", "(", "http", ":", "//github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf", ")", "meta", ":", "research", "log", "showing", "went", "wrong", "went", "right", ".", "useful", "'re", "planning", "pre-train", "large", "language", "model", "(", "case", ",", "175b", "parameter", ")", ".", "*", "[", "llm", "360", "]", "(", "http", ":", "//www.llm360.ai/", ")", ":", "framework", "open-source", "llm", "training", "data", "preparation", "code", ",", "data", ",", "metric", ",", "model", ".", "--", "-" ], [ "3 .", "pre-training model pre-training long costly process , focus course .", "'s good level understanding happens pre-training , hands-on experience required .", "* * * data pipeline * * : pre-training requires huge datasets ( e.g. , [ llama 2 ] ( http : //arxiv.org/abs/2307.09288 ) trained 2 trillion token ) need filtered , tokenized , collated pre-defined vocabulary .", "* * * causal language modeling * * : learn difference causal masked language modeling , well loss function used case .", "efficient pre-training , learn [ megatron-lm ] ( http : //github.com/nvidia/megatron-lm ) [ gpt-neox ] ( http : //github.com/eleutherai/gpt-neox ) .", "* * * scaling law * * : [ scaling law ] ( http : //arxiv.org/pdf/2001.08361.pdf ) describe expected model performance based model size , dataset size , amount compute used training .", "* * * high-performance computing * * : scope , knowledge hpc fundamental 're planning create llm scratch ( hardware , distributed workload , etc . ) .", "๐Ÿ“š * * reference * * : * [ llmdatahub ] ( http : //github.com/zjh-819/llmdatahub ) junhao zhao : curated list datasets pre-training , fine-tuning , rlhf .", "* [ training causal language model scratch ] ( http : //huggingface.co/learn/nlp-course/chapter7/6 ? fw=pt ) hugging face : pre-train gpt-2 model scratch using transformer library .", "* [ tinyllama ] ( http : //github.com/jzhang38/tinyllama ) zhang et al .", ": check project get good understanding llama model trained scratch .", "* [ causal language modeling ] ( http : //huggingface.co/docs/transformers/tasks/language_modeling ) hugging face : explain difference causal masked language modeling quickly fine-tune distilgpt-2 model .", "* [ chinchilla 's wild implication ] ( http : //www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications ) nostalgebraist : discus scaling law explain mean llm general .", "* [ bloom ] ( http : //bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4 ) bigscience : notion page describes bloom model built , lot useful information engineering part problem encountered .", "* [ opt-175 logbook ] ( http : //github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf ) meta : research log showing went wrong went right .", "useful 're planning pre-train large language model ( case , 175b parameter ) .", "* [ llm 360 ] ( http : //www.llm360.ai/ ) : framework open-source llm training data preparation code , data , metric , model .", "-- -" ] ]
3. Pre-training models Pre-training is a very long and costly process, which is why this is not the focus of this course. It's good to have some level of understanding of what happens during pre-training, but hands-on experience is not required. * **Data pipeline**: Pre-training requires huge datasets (e.g., [Llama 2](https://arxiv.org/abs/2307.09288) was trained on 2 trillion tokens) that need to be filtered, tokenized, and collated with a pre-defined vocabulary. * **Causal language modeling**: Learn the difference between causal and masked language modeling, as well as the loss function used in this case. For efficient pre-training, learn more about [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) or [gpt-neox](https://github.com/EleutherAI/gpt-neox). * **Scaling laws**: The [scaling laws](https://arxiv.org/pdf/2001.08361.pdf) describe the expected model performance based on the model size, dataset size, and the amount of compute used for training. * **High-Performance Computing**: Out of scope here, but more knowledge about HPC is fundamental if you're planning to create your own LLM from scratch (hardware, distributed workload, etc.). ๐Ÿ“š **References**: * [LLMDataHub](https://github.com/Zjh-819/LLMDataHub) by Junhao Zhao: Curated list of datasets for pre-training, fine-tuning, and RLHF. * [Training a causal language model from scratch](https://huggingface.co/learn/nlp-course/chapter7/6?fw=pt) by Hugging Face: Pre-train a GPT-2 model from scratch using the transformers library. * [TinyLlama](https://github.com/jzhang38/TinyLlama) by Zhang et al.: Check this project to get a good understanding of how a Llama model is trained from scratch. * [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) by Hugging Face: Explain the difference between causal and masked language modeling and how to quickly fine-tune a DistilGPT-2 model. * [Chinchilla's wild implications](https://www.lesswrong.com/posts/6Fpvch8RR29qLEWNH/chinchilla-s-wild-implications) by nostalgebraist: Discuss the scaling laws and explain what they mean to LLMs in general. * [BLOOM](https://bigscience.notion.site/BLOOM-BigScience-176B-Model-ad073ca07cdf479398d5f95d88e218c4) by BigScience: Notion page that describes how the BLOOM model was built, with a lot of useful information about the engineering part and the problems that were encountered. * [OPT-175 Logbook](https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/chronicles/OPT175B_Logbook.pdf) by Meta: Research logs showing what went wrong and what went right. Useful if you're planning to pre-train a very large language model (in this case, 175B parameters). * [LLM 360](https://www.llm360.ai/): A framework for open-source LLMs with training and data preparation code, data, metrics, and models. ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "4", ".", "supervised", "fine-tuning", "pre-trained", "model", "trained", "next-token", "prediction", "task", ",", "'re", "helpful", "assistant", ".", "sft", "allows", "tweak", "respond", "instruction", ".", "moreover", ",", "allows", "fine-tune", "model", "data", "(", "private", ",", "seen", "gpt-4", ",", "etc", ".", ")", "use", "without", "pay", "api", "like", "openai", "'s", ".", "*", "*", "*", "full", "fine-tuning", "*", "*", ":", "full", "fine-tuning", "refers", "training", "parameter", "model", ".", "efficient", "technique", ",", "produce", "slightly", "better", "result", ".", "*", "[", "*", "*", "lora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ":", "parameter-efficient", "technique", "(", "peft", ")", "based", "low-rank", "adapter", ".", "instead", "training", "parameter", ",", "train", "adapter", ".", "*", "[", "*", "*", "qlora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ":", "another", "peft", "based", "lora", ",", "also", "quantizes", "weight", "model", "4", "bit", "introduce", "paged", "optimizers", "manage", "memory", "spike", ".", "combine", "[", "unsloth", "]", "(", "http", ":", "//github.com/unslothai/unsloth", ")", "run", "efficiently", "free", "colab", "notebook", ".", "*", "*", "*", "[", "axolotl", "]", "(", "http", ":", "//github.com/openaccess-ai-collective/axolotl", ")", "*", "*", ":", "user-friendly", "powerful", "fine-tuning", "tool", "used", "lot", "state-of-the-art", "open-source", "model", ".", "*", "[", "*", "*", "deepspeed", "*", "*", "]", "(", "http", ":", "//www.deepspeed.ai/", ")", ":", "efficient", "pre-training", "fine-tuning", "llm", "multi-gpu", "multi-node", "setting", "(", "implemented", "axolotl", ")", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "novice", "'s", "llm", "training", "guide", "]", "(", "http", ":", "//rentry.org/llm-training", ")", "alpin", ":", "overview", "main", "concept", "parameter", "consider", "fine-tuning", "llm", ".", "*", "[", "lora", "insight", "]", "(", "http", ":", "//lightning.ai/pages/community/lora-insights/", ")", "sebastian", "raschka", ":", "practical", "insight", "lora", "select", "best", "parameter", ".", "*", "[", "fine-tune", "llama", "2", "model", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html", ")", ":", "hands-on", "tutorial", "fine-tune", "llama", "2", "model", "using", "hugging", "face", "library", ".", "*", "[", "padding", "large", "language", "model", "]", "(", "http", ":", "//towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff", ")", "benjamin", "marie", ":", "best", "practice", "pad", "training", "example", "causal", "llm", "*", "[", "beginner", "'s", "guide", "llm", "fine-tuning", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html", ")", ":", "tutorial", "fine-tune", "codellama", "model", "using", "axolotl", ".", "--", "-" ], [ "4 .", "supervised fine-tuning pre-trained model trained next-token prediction task , 're helpful assistant .", "sft allows tweak respond instruction .", "moreover , allows fine-tune model data ( private , seen gpt-4 , etc . )", "use without pay api like openai 's .", "* * * full fine-tuning * * : full fine-tuning refers training parameter model .", "efficient technique , produce slightly better result .", "* [ * * lora * * ] ( http : //arxiv.org/abs/2106.09685 ) : parameter-efficient technique ( peft ) based low-rank adapter .", "instead training parameter , train adapter .", "* [ * * qlora * * ] ( http : //arxiv.org/abs/2305.14314 ) : another peft based lora , also quantizes weight model 4 bit introduce paged optimizers manage memory spike .", "combine [ unsloth ] ( http : //github.com/unslothai/unsloth ) run efficiently free colab notebook .", "* * * [ axolotl ] ( http : //github.com/openaccess-ai-collective/axolotl ) * * : user-friendly powerful fine-tuning tool used lot state-of-the-art open-source model .", "* [ * * deepspeed * * ] ( http : //www.deepspeed.ai/ ) : efficient pre-training fine-tuning llm multi-gpu multi-node setting ( implemented axolotl ) .", "๐Ÿ“š * * reference * * : * [ novice 's llm training guide ] ( http : //rentry.org/llm-training ) alpin : overview main concept parameter consider fine-tuning llm .", "* [ lora insight ] ( http : //lightning.ai/pages/community/lora-insights/ ) sebastian raschka : practical insight lora select best parameter .", "* [ fine-tune llama 2 model ] ( http : //mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html ) : hands-on tutorial fine-tune llama 2 model using hugging face library .", "* [ padding large language model ] ( http : //towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff ) benjamin marie : best practice pad training example causal llm * [ beginner 's guide llm fine-tuning ] ( http : //mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html ) : tutorial fine-tune codellama model using axolotl .", "-- -" ] ]
[ [ "4", ".", "supervised", "fine-tuning", "pre-trained", "model", "trained", "next-token", "prediction", "task", ",", "'re", "helpful", "assistant", ".", "sft", "allows", "tweak", "respond", "instruction", ".", "moreover", ",", "allows", "fine-tune", "model", "data", "(", "private", ",", "seen", "gpt-4", ",", "etc", ".", ")", "use", "without", "pay", "api", "like", "openai", "'s", ".", "*", "*", "*", "full", "fine-tuning", "*", "*", ":", "full", "fine-tuning", "refers", "training", "parameter", "model", ".", "efficient", "technique", ",", "produce", "slightly", "better", "result", ".", "*", "[", "*", "*", "lora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ":", "parameter-efficient", "technique", "(", "peft", ")", "based", "low-rank", "adapter", ".", "instead", "training", "parameter", ",", "train", "adapter", ".", "*", "[", "*", "*", "qlora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ":", "another", "peft", "based", "lora", ",", "also", "quantizes", "weight", "model", "4", "bit", "introduce", "paged", "optimizers", "manage", "memory", "spike", ".", "combine", "[", "unsloth", "]", "(", "http", ":", "//github.com/unslothai/unsloth", ")", "run", "efficiently", "free", "colab", "notebook", ".", "*", "*", "*", "[", "axolotl", "]", "(", "http", ":", "//github.com/openaccess-ai-collective/axolotl", ")", "*", "*", ":", "user-friendly", "powerful", "fine-tuning", "tool", "used", "lot", "state-of-the-art", "open-source", "model", ".", "*", "[", "*", "*", "deepspeed", "*", "*", "]", "(", "http", ":", "//www.deepspeed.ai/", ")", ":", "efficient", "pre-training", "fine-tuning", "llm", "multi-gpu", "multi-node", "setting", "(", "implemented", "axolotl", ")", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "novice", "'s", "llm", "training", "guide", "]", "(", "http", ":", "//rentry.org/llm-training", ")", "alpin", ":", "overview", "main", "concept", "parameter", "consider", "fine-tuning", "llm", ".", "*", "[", "lora", "insight", "]", "(", "http", ":", "//lightning.ai/pages/community/lora-insights/", ")", "sebastian", "raschka", ":", "practical", "insight", "lora", "select", "best", "parameter", ".", "*", "[", "fine-tune", "llama", "2", "model", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html", ")", ":", "hands-on", "tutorial", "fine-tune", "llama", "2", "model", "using", "hugging", "face", "library", ".", "*", "[", "padding", "large", "language", "model", "]", "(", "http", ":", "//towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff", ")", "benjamin", "marie", ":", "best", "practice", "pad", "training", "example", "causal", "llm", "*", "[", "beginner", "'s", "guide", "llm", "fine-tuning", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html", ")", ":", "tutorial", "fine-tune", "codellama", "model", "using", "axolotl", ".", "--", "-" ], [ "4 .", "supervised fine-tuning pre-trained model trained next-token prediction task , 're helpful assistant .", "sft allows tweak respond instruction .", "moreover , allows fine-tune model data ( private , seen gpt-4 , etc . )", "use without pay api like openai 's .", "* * * full fine-tuning * * : full fine-tuning refers training parameter model .", "efficient technique , produce slightly better result .", "* [ * * lora * * ] ( http : //arxiv.org/abs/2106.09685 ) : parameter-efficient technique ( peft ) based low-rank adapter .", "instead training parameter , train adapter .", "* [ * * qlora * * ] ( http : //arxiv.org/abs/2305.14314 ) : another peft based lora , also quantizes weight model 4 bit introduce paged optimizers manage memory spike .", "combine [ unsloth ] ( http : //github.com/unslothai/unsloth ) run efficiently free colab notebook .", "* * * [ axolotl ] ( http : //github.com/openaccess-ai-collective/axolotl ) * * : user-friendly powerful fine-tuning tool used lot state-of-the-art open-source model .", "* [ * * deepspeed * * ] ( http : //www.deepspeed.ai/ ) : efficient pre-training fine-tuning llm multi-gpu multi-node setting ( implemented axolotl ) .", "๐Ÿ“š * * reference * * : * [ novice 's llm training guide ] ( http : //rentry.org/llm-training ) alpin : overview main concept parameter consider fine-tuning llm .", "* [ lora insight ] ( http : //lightning.ai/pages/community/lora-insights/ ) sebastian raschka : practical insight lora select best parameter .", "* [ fine-tune llama 2 model ] ( http : //mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html ) : hands-on tutorial fine-tune llama 2 model using hugging face library .", "* [ padding large language model ] ( http : //towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff ) benjamin marie : best practice pad training example causal llm * [ beginner 's guide llm fine-tuning ] ( http : //mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html ) : tutorial fine-tune codellama model using axolotl .", "-- -" ] ]
4. Supervised Fine-Tuning Pre-trained models are only trained on a next-token prediction task, which is why they're not helpful assistants. SFT allows you to tweak them to respond to instructions. Moreover, it allows you to fine-tune your model on any data (private, not seen by GPT-4, etc.) and use it without having to pay for an API like OpenAI's. * **Full fine-tuning**: Full fine-tuning refers to training all the parameters in the model. It is not an efficient technique, but it produces slightly better results. * [**LoRA**](https://arxiv.org/abs/2106.09685): A parameter-efficient technique (PEFT) based on low-rank adapters. Instead of training all the parameters, we only train these adapters. * [**QLoRA**](https://arxiv.org/abs/2305.14314): Another PEFT based on LoRA, which also quantizes the weights of the model in 4 bits and introduce paged optimizers to manage memory spikes. Combine it with [Unsloth](https://github.com/unslothai/unsloth) to run it efficiently on a free Colab notebook. * **[Axolotl](https://github.com/OpenAccess-AI-Collective/axolotl)**: A user-friendly and powerful fine-tuning tool that is used in a lot of state-of-the-art open-source models. * [**DeepSpeed**](https://www.deepspeed.ai/): Efficient pre-training and fine-tuning of LLMs for multi-GPU and multi-node settings (implemented in Axolotl). ๐Ÿ“š **References**: * [The Novice's LLM Training Guide](https://rentry.org/llm-training) by Alpin: Overview of the main concepts and parameters to consider when fine-tuning LLMs. * [LoRA insights](https://lightning.ai/pages/community/lora-insights/) by Sebastian Raschka: Practical insights about LoRA and how to select the best parameters. * [Fine-Tune Your Own Llama 2 Model](https://mlabonne.github.io/blog/posts/Fine_Tune_Your_Own_Llama_2_Model_in_a_Colab_Notebook.html): Hands-on tutorial on how to fine-tune a Llama 2 model using Hugging Face libraries. * [Padding Large Language Models](https://towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff) by Benjamin Marie: Best practices to pad training examples for causal LLMs * [A Beginner's Guide to LLM Fine-Tuning](https://mlabonne.github.io/blog/posts/A_Beginners_Guide_to_LLM_Finetuning.html): Tutorial on how to fine-tune a CodeLlama model using Axolotl. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "6", ".", "evaluation", "evaluating", "llm", "undervalued", "part", "pipeline", ",", "time-consuming", "moderately", "reliable", ".", "downstream", "task", "dictate", "want", "evaluate", ",", "always", "remember", "goodhart", "'s", "law", ":", "``", "measure", "becomes", "target", ",", "cease", "good", "measure", ".", "''", "*", "*", "*", "traditional", "metric", "*", "*", ":", "metric", "like", "perplexity", "bleu", "score", "popular", "'re", "flawed", "context", ".", "still", "important", "understand", "applied", ".", "*", "*", "*", "general", "benchmark", "*", "*", ":", "based", "[", "language", "model", "evaluation", "harness", "]", "(", "http", ":", "//github.com/eleutherai/lm-evaluation-harness", ")", ",", "[", "open", "llm", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard", ")", "main", "benchmark", "general-purpose", "llm", "(", "like", "chatgpt", ")", ".", "popular", "benchmark", "like", "[", "bigbench", "]", "(", "http", ":", "//github.com/google/big-bench", ")", ",", "[", "mt-bench", "]", "(", "http", ":", "//arxiv.org/abs/2306.05685", ")", ",", "etc", ".", "*", "*", "*", "task-specific", "benchmark", "*", "*", ":", "task", "like", "summarization", ",", "translation", ",", "question", "answering", "dedicated", "benchmark", ",", "metric", ",", "even", "subdomains", "(", "medical", ",", "financial", ",", "etc", ".", ")", ",", "[", "pubmedqa", "]", "(", "http", ":", "//pubmedqa.github.io/", ")", "biomedical", "question", "answering", ".", "*", "*", "*", "human", "evaluation", "*", "*", ":", "reliable", "evaluation", "acceptance", "rate", "user", "comparison", "made", "human", ".", "want", "know", "model", "performs", "well", ",", "simplest", "surest", "way", "use", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "perplexity", "fixed-length", "model", "]", "(", "http", ":", "//huggingface.co/docs/transformers/perplexity", ")", "hugging", "face", ":", "overview", "perplexity", "code", "implement", "transformer", "library", ".", "*", "[", "bleu", "risk", "]", "(", "http", ":", "//towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ")", "rachael", "tatman", ":", "overview", "bleu", "score", "many", "issue", "example", ".", "*", "[", "survey", "evaluation", "llm", "]", "(", "http", ":", "//arxiv.org/abs/2307.03109", ")", "chang", "et", "al", ".", ":", "comprehensive", "paper", "evaluate", ",", "evaluate", ",", "evaluate", ".", "*", "[", "chatbot", "arena", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/lmsys/chatbot-arena-leaderboard", ")", "lmsys", ":", "elo", "rating", "general-purpose", "llm", ",", "based", "comparison", "made", "human", ".", "--", "-" ], [ "6 .", "evaluation evaluating llm undervalued part pipeline , time-consuming moderately reliable .", "downstream task dictate want evaluate , always remember goodhart 's law : `` measure becomes target , cease good measure . ''", "* * * traditional metric * * : metric like perplexity bleu score popular 're flawed context .", "still important understand applied .", "* * * general benchmark * * : based [ language model evaluation harness ] ( http : //github.com/eleutherai/lm-evaluation-harness ) , [ open llm leaderboard ] ( http : //huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard ) main benchmark general-purpose llm ( like chatgpt ) .", "popular benchmark like [ bigbench ] ( http : //github.com/google/big-bench ) , [ mt-bench ] ( http : //arxiv.org/abs/2306.05685 ) , etc .", "* * * task-specific benchmark * * : task like summarization , translation , question answering dedicated benchmark , metric , even subdomains ( medical , financial , etc .", ") , [ pubmedqa ] ( http : //pubmedqa.github.io/ ) biomedical question answering .", "* * * human evaluation * * : reliable evaluation acceptance rate user comparison made human .", "want know model performs well , simplest surest way use .", "๐Ÿ“š * * reference * * : * [ perplexity fixed-length model ] ( http : //huggingface.co/docs/transformers/perplexity ) hugging face : overview perplexity code implement transformer library .", "* [ bleu risk ] ( http : //towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213 ) rachael tatman : overview bleu score many issue example .", "* [ survey evaluation llm ] ( http : //arxiv.org/abs/2307.03109 ) chang et al .", ": comprehensive paper evaluate , evaluate , evaluate .", "* [ chatbot arena leaderboard ] ( http : //huggingface.co/spaces/lmsys/chatbot-arena-leaderboard ) lmsys : elo rating general-purpose llm , based comparison made human .", "-- -" ] ]
[ [ "6", ".", "evaluation", "evaluating", "llm", "undervalued", "part", "pipeline", ",", "time-consuming", "moderately", "reliable", ".", "downstream", "task", "dictate", "want", "evaluate", ",", "always", "remember", "goodhart", "'s", "law", ":", "``", "measure", "becomes", "target", ",", "cease", "good", "measure", ".", "''", "*", "*", "*", "traditional", "metric", "*", "*", ":", "metric", "like", "perplexity", "bleu", "score", "popular", "'re", "flawed", "context", ".", "still", "important", "understand", "applied", ".", "*", "*", "*", "general", "benchmark", "*", "*", ":", "based", "[", "language", "model", "evaluation", "harness", "]", "(", "http", ":", "//github.com/eleutherai/lm-evaluation-harness", ")", ",", "[", "open", "llm", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard", ")", "main", "benchmark", "general-purpose", "llm", "(", "like", "chatgpt", ")", ".", "popular", "benchmark", "like", "[", "bigbench", "]", "(", "http", ":", "//github.com/google/big-bench", ")", ",", "[", "mt-bench", "]", "(", "http", ":", "//arxiv.org/abs/2306.05685", ")", ",", "etc", ".", "*", "*", "*", "task-specific", "benchmark", "*", "*", ":", "task", "like", "summarization", ",", "translation", ",", "question", "answering", "dedicated", "benchmark", ",", "metric", ",", "even", "subdomains", "(", "medical", ",", "financial", ",", "etc", ".", ")", ",", "[", "pubmedqa", "]", "(", "http", ":", "//pubmedqa.github.io/", ")", "biomedical", "question", "answering", ".", "*", "*", "*", "human", "evaluation", "*", "*", ":", "reliable", "evaluation", "acceptance", "rate", "user", "comparison", "made", "human", ".", "want", "know", "model", "performs", "well", ",", "simplest", "surest", "way", "use", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "perplexity", "fixed-length", "model", "]", "(", "http", ":", "//huggingface.co/docs/transformers/perplexity", ")", "hugging", "face", ":", "overview", "perplexity", "code", "implement", "transformer", "library", ".", "*", "[", "bleu", "risk", "]", "(", "http", ":", "//towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ")", "rachael", "tatman", ":", "overview", "bleu", "score", "many", "issue", "example", ".", "*", "[", "survey", "evaluation", "llm", "]", "(", "http", ":", "//arxiv.org/abs/2307.03109", ")", "chang", "et", "al", ".", ":", "comprehensive", "paper", "evaluate", ",", "evaluate", ",", "evaluate", ".", "*", "[", "chatbot", "arena", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/lmsys/chatbot-arena-leaderboard", ")", "lmsys", ":", "elo", "rating", "general-purpose", "llm", ",", "based", "comparison", "made", "human", ".", "--", "-" ], [ "6 .", "evaluation evaluating llm undervalued part pipeline , time-consuming moderately reliable .", "downstream task dictate want evaluate , always remember goodhart 's law : `` measure becomes target , cease good measure . ''", "* * * traditional metric * * : metric like perplexity bleu score popular 're flawed context .", "still important understand applied .", "* * * general benchmark * * : based [ language model evaluation harness ] ( http : //github.com/eleutherai/lm-evaluation-harness ) , [ open llm leaderboard ] ( http : //huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard ) main benchmark general-purpose llm ( like chatgpt ) .", "popular benchmark like [ bigbench ] ( http : //github.com/google/big-bench ) , [ mt-bench ] ( http : //arxiv.org/abs/2306.05685 ) , etc .", "* * * task-specific benchmark * * : task like summarization , translation , question answering dedicated benchmark , metric , even subdomains ( medical , financial , etc .", ") , [ pubmedqa ] ( http : //pubmedqa.github.io/ ) biomedical question answering .", "* * * human evaluation * * : reliable evaluation acceptance rate user comparison made human .", "want know model performs well , simplest surest way use .", "๐Ÿ“š * * reference * * : * [ perplexity fixed-length model ] ( http : //huggingface.co/docs/transformers/perplexity ) hugging face : overview perplexity code implement transformer library .", "* [ bleu risk ] ( http : //towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213 ) rachael tatman : overview bleu score many issue example .", "* [ survey evaluation llm ] ( http : //arxiv.org/abs/2307.03109 ) chang et al .", ": comprehensive paper evaluate , evaluate , evaluate .", "* [ chatbot arena leaderboard ] ( http : //huggingface.co/spaces/lmsys/chatbot-arena-leaderboard ) lmsys : elo rating general-purpose llm , based comparison made human .", "-- -" ] ]
6. Evaluation Evaluating LLMs is an undervalued part of the pipeline, which is time-consuming and moderately reliable. Your downstream task should dictate what you want to evaluate, but always remember Goodhart's law: "When a measure becomes a target, it ceases to be a good measure." * **Traditional metrics**: Metrics like perplexity and BLEU score are not as popular as they were because they're flawed in most contexts. It is still important to understand them and when they can be applied. * **General benchmarks**: Based on the [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) is the main benchmark for general-purpose LLMs (like ChatGPT). There are other popular benchmarks like [BigBench](https://github.com/google/BIG-bench), [MT-Bench](https://arxiv.org/abs/2306.05685), etc. * **Task-specific benchmarks**: Tasks like summarization, translation, and question answering have dedicated benchmarks, metrics, and even subdomains (medical, financial, etc.), such as [PubMedQA](https://pubmedqa.github.io/) for biomedical question answering. * **Human evaluation**: The most reliable evaluation is the acceptance rate by users or comparisons made by humans. If you want to know if a model performs well, the simplest but surest way is to use it yourself. ๐Ÿ“š **References**: * [Perplexity of fixed-length models](https://huggingface.co/docs/transformers/perplexity) by Hugging Face: Overview of perplexity with code to implement it with the transformers library. * [BLEU at your own risk](https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213) by Rachael Tatman: Overview of the BLEU score and its many issues with examples. * [A Survey on Evaluation of LLMs](https://arxiv.org/abs/2307.03109) by Chang et al.: Comprehensive paper about what to evaluate, where to evaluate, and how to evaluate. * [Chatbot Arena Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) by lmsys: Elo rating of general-purpose LLMs, based on comparisons made by humans. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "7", ".", "quantization", "quantization", "process", "converting", "weight", "(", "activation", ")", "model", "using", "lower", "precision", ".", "example", ",", "weight", "stored", "using", "16", "bit", "converted", "4-bit", "representation", ".", "technique", "become", "increasingly", "important", "reduce", "computational", "memory", "cost", "associated", "llm", ".", "*", "*", "*", "base", "technique", "*", "*", ":", "learn", "different", "level", "precision", "(", "fp32", ",", "fp16", ",", "int8", ",", "etc", ".", ")", "perform", "naรฏve", "quantization", "absmax", "zero-point", "technique", ".", "*", "*", "*", "gguf", "llama.cpp", "*", "*", ":", "originally", "designed", "run", "cpu", ",", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "gguf", "format", "become", "popular", "tool", "run", "llm", "consumer-grade", "hardware", ".", "*", "*", "*", "gptq", "exl2", "*", "*", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "specifically", ",", "[", "exl2", "]", "(", "http", ":", "//github.com/turboderp/exllamav2", ")", "format", "offer", "incredible", "speed", "run", "gpus", ".", "model", "also", "take", "long", "time", "quantized", ".", "*", "*", "*", "awq", "*", "*", ":", "new", "format", "accurate", "gptq", "(", "lower", "perplexity", ")", "us", "lot", "vram", "necessarily", "faster", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "introduction", "quantization", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "overview", "quantization", ",", "absmax", "zero-point", "quantization", ",", "llm.int8", "(", ")", "code", ".", "*", "[", "quantize", "llama", "model", "llama.cpp", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html", ")", ":", "tutorial", "quantize", "llama", "2", "model", "using", "llama.cpp", "gguf", "format", ".", "*", "[", "4-bit", "llm", "quantization", "gptq", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "tutorial", "quantize", "llm", "using", "gptq", "algorithm", "autogptq", ".", "*", "[", "exllamav2", ":", "fastest", "library", "run", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run", "%", "c2", "%", "a0llms.html", ")", ":", "guide", "quantize", "mistral", "model", "using", "exl2", "format", "run", "exllamav2", "library", ".", "*", "[", "understanding", "activation-aware", "weight", "quantization", "]", "(", "http", ":", "//medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8", ")", "friendliai", ":", "overview", "awq", "technique", "benefit", ".", "--", "-" ], [ "7 .", "quantization quantization process converting weight ( activation ) model using lower precision .", "example , weight stored using 16 bit converted 4-bit representation .", "technique become increasingly important reduce computational memory cost associated llm .", "* * * base technique * * : learn different level precision ( fp32 , fp16 , int8 , etc . )", "perform naรฏve quantization absmax zero-point technique .", "* * * gguf llama.cpp * * : originally designed run cpu , [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) gguf format become popular tool run llm consumer-grade hardware .", "* * * gptq exl2 * * : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , specifically , [ exl2 ] ( http : //github.com/turboderp/exllamav2 ) format offer incredible speed run gpus .", "model also take long time quantized .", "* * * awq * * : new format accurate gptq ( lower perplexity ) us lot vram necessarily faster .", "๐Ÿ“š * * reference * * : * [ introduction quantization ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : overview quantization , absmax zero-point quantization , llm.int8 ( ) code .", "* [ quantize llama model llama.cpp ] ( http : //mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html ) : tutorial quantize llama 2 model using llama.cpp gguf format .", "* [ 4-bit llm quantization gptq ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : tutorial quantize llm using gptq algorithm autogptq .", "* [ exllamav2 : fastest library run llm ] ( http : //mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run % c2 % a0llms.html ) : guide quantize mistral model using exl2 format run exllamav2 library .", "* [ understanding activation-aware weight quantization ] ( http : //medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8 ) friendliai : overview awq technique benefit .", "-- -" ] ]
[ [ "7", ".", "quantization", "quantization", "process", "converting", "weight", "(", "activation", ")", "model", "using", "lower", "precision", ".", "example", ",", "weight", "stored", "using", "16", "bit", "converted", "4-bit", "representation", ".", "technique", "become", "increasingly", "important", "reduce", "computational", "memory", "cost", "associated", "llm", ".", "*", "*", "*", "base", "technique", "*", "*", ":", "learn", "different", "level", "precision", "(", "fp32", ",", "fp16", ",", "int8", ",", "etc", ".", ")", "perform", "naรฏve", "quantization", "absmax", "zero-point", "technique", ".", "*", "*", "*", "gguf", "llama.cpp", "*", "*", ":", "originally", "designed", "run", "cpu", ",", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "gguf", "format", "become", "popular", "tool", "run", "llm", "consumer-grade", "hardware", ".", "*", "*", "*", "gptq", "exl2", "*", "*", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "specifically", ",", "[", "exl2", "]", "(", "http", ":", "//github.com/turboderp/exllamav2", ")", "format", "offer", "incredible", "speed", "run", "gpus", ".", "model", "also", "take", "long", "time", "quantized", ".", "*", "*", "*", "awq", "*", "*", ":", "new", "format", "accurate", "gptq", "(", "lower", "perplexity", ")", "us", "lot", "vram", "necessarily", "faster", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "introduction", "quantization", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "overview", "quantization", ",", "absmax", "zero-point", "quantization", ",", "llm.int8", "(", ")", "code", ".", "*", "[", "quantize", "llama", "model", "llama.cpp", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html", ")", ":", "tutorial", "quantize", "llama", "2", "model", "using", "llama.cpp", "gguf", "format", ".", "*", "[", "4-bit", "llm", "quantization", "gptq", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "tutorial", "quantize", "llm", "using", "gptq", "algorithm", "autogptq", ".", "*", "[", "exllamav2", ":", "fastest", "library", "run", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run", "%", "c2", "%", "a0llms.html", ")", ":", "guide", "quantize", "mistral", "model", "using", "exl2", "format", "run", "exllamav2", "library", ".", "*", "[", "understanding", "activation-aware", "weight", "quantization", "]", "(", "http", ":", "//medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8", ")", "friendliai", ":", "overview", "awq", "technique", "benefit", ".", "--", "-" ], [ "7 .", "quantization quantization process converting weight ( activation ) model using lower precision .", "example , weight stored using 16 bit converted 4-bit representation .", "technique become increasingly important reduce computational memory cost associated llm .", "* * * base technique * * : learn different level precision ( fp32 , fp16 , int8 , etc . )", "perform naรฏve quantization absmax zero-point technique .", "* * * gguf llama.cpp * * : originally designed run cpu , [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) gguf format become popular tool run llm consumer-grade hardware .", "* * * gptq exl2 * * : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , specifically , [ exl2 ] ( http : //github.com/turboderp/exllamav2 ) format offer incredible speed run gpus .", "model also take long time quantized .", "* * * awq * * : new format accurate gptq ( lower perplexity ) us lot vram necessarily faster .", "๐Ÿ“š * * reference * * : * [ introduction quantization ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : overview quantization , absmax zero-point quantization , llm.int8 ( ) code .", "* [ quantize llama model llama.cpp ] ( http : //mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html ) : tutorial quantize llama 2 model using llama.cpp gguf format .", "* [ 4-bit llm quantization gptq ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : tutorial quantize llm using gptq algorithm autogptq .", "* [ exllamav2 : fastest library run llm ] ( http : //mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run % c2 % a0llms.html ) : guide quantize mistral model using exl2 format run exllamav2 library .", "* [ understanding activation-aware weight quantization ] ( http : //medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8 ) friendliai : overview awq technique benefit .", "-- -" ] ]
7. Quantization Quantization is the process of converting the weights (and activations) of a model using a lower precision. For example, weights stored using 16 bits can be converted into a 4-bit representation. This technique has become increasingly important to reduce the computational and memory costs associated with LLMs. * **Base techniques**: Learn the different levels of precision (FP32, FP16, INT8, etc.) and how to perform naรฏve quantization with absmax and zero-point techniques. * **GGUF and llama.cpp**: Originally designed to run on CPUs, [llama.cpp](https://github.com/ggerganov/llama.cpp) and the GGUF format have become the most popular tools to run LLMs on consumer-grade hardware. * **GPTQ and EXL2**: [GPTQ](https://arxiv.org/abs/2210.17323) and, more specifically, the [EXL2](https://github.com/turboderp/exllamav2) format offer an incredible speed but can only run on GPUs. Models also take a long time to be quantized. * **AWQ**: This new format is more accurate than GPTQ (lower perplexity) but uses a lot more VRAM and is not necessarily faster. ๐Ÿ“š **References**: * [Introduction to quantization](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html): Overview of quantization, absmax and zero-point quantization, and LLM.int8() with code. * [Quantize Llama models with llama.cpp](https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html): Tutorial on how to quantize a Llama 2 model using llama.cpp and the GGUF format. * [4-bit LLM Quantization with GPTQ](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html): Tutorial on how to quantize an LLM using the GPTQ algorithm with AutoGPTQ. * [ExLlamaV2: The Fastest Library to Run LLMs](https://mlabonne.github.io/blog/posts/ExLlamaV2_The_Fastest_Library_to_Run%C2%A0LLMs.html): Guide on how to quantize a Mistral model using the EXL2 format and run it with the ExLlamaV2 library. * [Understanding Activation-Aware Weight Quantization](https://medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8) by FriendliAI: Overview of the AWQ technique and its benefits. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "๐Ÿ‘ท", "llm", "engineer", "section", "course", "focus", "learning", "build", "llm-powered", "application", "used", "production", ",", "focus", "augmenting", "model", "deploying", ".", "!", "[", "]", "(", "img/roadmap_engineer.png", ")" ], [ "๐Ÿ‘ท llm engineer section course focus learning build llm-powered application used production , focus augmenting model deploying .", "!", "[ ] ( img/roadmap_engineer.png )" ] ]
[ [ "๐Ÿ‘ท", "llm", "engineer", "section", "course", "focus", "learning", "build", "llm-powered", "application", "used", "production", ",", "focus", "augmenting", "model", "deploying", ".", "!", "[", "]", "(", "img/roadmap_engineer.png", ")" ], [ "๐Ÿ‘ท llm engineer section course focus learning build llm-powered application used production , focus augmenting model deploying .", "!", "[ ] ( img/roadmap_engineer.png )" ] ]
๐Ÿ‘ท The LLM Engineer This section of the course focuses on learning how to build LLM-powered applications that can be used in production, with a focus on augmenting models and deploying them. ![](img/roadmap_engineer.png)
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "1", ".", "running", "llm", "running", "llm", "difficult", "due", "high", "hardware", "requirement", ".", "depending", "use", "case", ",", "might", "want", "simply", "consume", "model", "api", "(", "like", "gpt-4", ")", "run", "locally", ".", "case", ",", "additional", "prompting", "guidance", "technique", "improve", "constrain", "output", "application", ".", "*", "*", "*", "llm", "apis", "*", "*", ":", "apis", "convenient", "way", "deploy", "llm", ".", "space", "divided", "private", "llm", "(", "[", "openai", "]", "(", "http", ":", "//platform.openai.com/", ")", ",", "[", "google", "]", "(", "http", ":", "//cloud.google.com/vertex-ai/docs/generative-ai/learn/overview", ")", ",", "[", "anthropic", "]", "(", "http", ":", "//docs.anthropic.com/claude/reference/getting-started-with-the-api", ")", ",", "[", "cohere", "]", "(", "http", ":", "//docs.cohere.com/docs", ")", ",", "etc", ".", ")", "open-source", "llm", "(", "[", "openrouter", "]", "(", "http", ":", "//openrouter.ai/", ")", ",", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/inference-api", ")", ",", "[", "together", "ai", "]", "(", "http", ":", "//www.together.ai/", ")", ",", "etc", ".", ")", ".", "*", "*", "*", "open-source", "llm", "*", "*", ":", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/models", ")", "great", "place", "find", "llm", ".", "directly", "run", "[", "hugging", "face", "space", "]", "(", "http", ":", "//huggingface.co/spaces", ")", ",", "download", "run", "locally", "apps", "like", "[", "lm", "studio", "]", "(", "http", ":", "//lmstudio.ai/", ")", "cli", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "[", "ollama", "]", "(", "http", ":", "//ollama.ai/", ")", ".", "*", "*", "*", "prompt", "engineering", "*", "*", ":", "common", "technique", "include", "zero-shot", "prompting", ",", "few-shot", "prompting", ",", "chain", "thought", ",", "react", ".", "work", "better", "bigger", "model", ",", "adapted", "smaller", "one", ".", "*", "*", "*", "structuring", "output", "*", "*", ":", "many", "task", "require", "structured", "output", ",", "like", "strict", "template", "json", "format", ".", "library", "like", "[", "lmql", "]", "(", "http", ":", "//lmql.ai/", ")", ",", "[", "outline", "]", "(", "http", ":", "//github.com/outlines-dev/outlines", ")", ",", "[", "guidance", "]", "(", "http", ":", "//github.com/guidance-ai/guidance", ")", ",", "etc", ".", "used", "guide", "generation", "respect", "given", "structure", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "run", "llm", "locally", "lm", "studio", "]", "(", "http", ":", "//www.kdnuggets.com/run-an-llm-locally-with-lm-studio", ")", "nisha", "arya", ":", "short", "guide", "use", "lm", "studio", ".", "*", "[", "prompt", "engineering", "guide", "]", "(", "http", ":", "//www.promptingguide.ai/", ")", "dair.ai", ":", "exhaustive", "list", "prompt", "technique", "example", "*", "[", "outline", "-", "quickstart", "]", "(", "http", ":", "//outlines-dev.github.io/outlines/quickstart/", ")", ":", "list", "guided", "generation", "technique", "enabled", "outline", ".", "*", "[", "lmql", "-", "overview", "]", "(", "http", ":", "//lmql.ai/docs/language/overview.html", ")", ":", "introduction", "lmql", "language", ".", "--", "-" ], [ "1 .", "running llm running llm difficult due high hardware requirement .", "depending use case , might want simply consume model api ( like gpt-4 ) run locally .", "case , additional prompting guidance technique improve constrain output application .", "* * * llm apis * * : apis convenient way deploy llm .", "space divided private llm ( [ openai ] ( http : //platform.openai.com/ ) , [ google ] ( http : //cloud.google.com/vertex-ai/docs/generative-ai/learn/overview ) , [ anthropic ] ( http : //docs.anthropic.com/claude/reference/getting-started-with-the-api ) , [ cohere ] ( http : //docs.cohere.com/docs ) , etc . )", "open-source llm ( [ openrouter ] ( http : //openrouter.ai/ ) , [ hugging face ] ( http : //huggingface.co/inference-api ) , [ together ai ] ( http : //www.together.ai/ ) , etc . ) .", "* * * open-source llm * * : [ hugging face hub ] ( http : //huggingface.co/models ) great place find llm .", "directly run [ hugging face space ] ( http : //huggingface.co/spaces ) , download run locally apps like [ lm studio ] ( http : //lmstudio.ai/ ) cli [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) [ ollama ] ( http : //ollama.ai/ ) .", "* * * prompt engineering * * : common technique include zero-shot prompting , few-shot prompting , chain thought , react .", "work better bigger model , adapted smaller one .", "* * * structuring output * * : many task require structured output , like strict template json format .", "library like [ lmql ] ( http : //lmql.ai/ ) , [ outline ] ( http : //github.com/outlines-dev/outlines ) , [ guidance ] ( http : //github.com/guidance-ai/guidance ) , etc .", "used guide generation respect given structure .", "๐Ÿ“š * * reference * * : * [ run llm locally lm studio ] ( http : //www.kdnuggets.com/run-an-llm-locally-with-lm-studio ) nisha arya : short guide use lm studio .", "* [ prompt engineering guide ] ( http : //www.promptingguide.ai/ ) dair.ai : exhaustive list prompt technique example * [ outline - quickstart ] ( http : //outlines-dev.github.io/outlines/quickstart/ ) : list guided generation technique enabled outline .", "* [ lmql - overview ] ( http : //lmql.ai/docs/language/overview.html ) : introduction lmql language .", "-- -" ] ]
[ [ "1", ".", "running", "llm", "running", "llm", "difficult", "due", "high", "hardware", "requirement", ".", "depending", "use", "case", ",", "might", "want", "simply", "consume", "model", "api", "(", "like", "gpt-4", ")", "run", "locally", ".", "case", ",", "additional", "prompting", "guidance", "technique", "improve", "constrain", "output", "application", ".", "*", "*", "*", "llm", "apis", "*", "*", ":", "apis", "convenient", "way", "deploy", "llm", ".", "space", "divided", "private", "llm", "(", "[", "openai", "]", "(", "http", ":", "//platform.openai.com/", ")", ",", "[", "google", "]", "(", "http", ":", "//cloud.google.com/vertex-ai/docs/generative-ai/learn/overview", ")", ",", "[", "anthropic", "]", "(", "http", ":", "//docs.anthropic.com/claude/reference/getting-started-with-the-api", ")", ",", "[", "cohere", "]", "(", "http", ":", "//docs.cohere.com/docs", ")", ",", "etc", ".", ")", "open-source", "llm", "(", "[", "openrouter", "]", "(", "http", ":", "//openrouter.ai/", ")", ",", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/inference-api", ")", ",", "[", "together", "ai", "]", "(", "http", ":", "//www.together.ai/", ")", ",", "etc", ".", ")", ".", "*", "*", "*", "open-source", "llm", "*", "*", ":", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/models", ")", "great", "place", "find", "llm", ".", "directly", "run", "[", "hugging", "face", "space", "]", "(", "http", ":", "//huggingface.co/spaces", ")", ",", "download", "run", "locally", "apps", "like", "[", "lm", "studio", "]", "(", "http", ":", "//lmstudio.ai/", ")", "cli", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "[", "ollama", "]", "(", "http", ":", "//ollama.ai/", ")", ".", "*", "*", "*", "prompt", "engineering", "*", "*", ":", "common", "technique", "include", "zero-shot", "prompting", ",", "few-shot", "prompting", ",", "chain", "thought", ",", "react", ".", "work", "better", "bigger", "model", ",", "adapted", "smaller", "one", ".", "*", "*", "*", "structuring", "output", "*", "*", ":", "many", "task", "require", "structured", "output", ",", "like", "strict", "template", "json", "format", ".", "library", "like", "[", "lmql", "]", "(", "http", ":", "//lmql.ai/", ")", ",", "[", "outline", "]", "(", "http", ":", "//github.com/outlines-dev/outlines", ")", ",", "[", "guidance", "]", "(", "http", ":", "//github.com/guidance-ai/guidance", ")", ",", "etc", ".", "used", "guide", "generation", "respect", "given", "structure", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "run", "llm", "locally", "lm", "studio", "]", "(", "http", ":", "//www.kdnuggets.com/run-an-llm-locally-with-lm-studio", ")", "nisha", "arya", ":", "short", "guide", "use", "lm", "studio", ".", "*", "[", "prompt", "engineering", "guide", "]", "(", "http", ":", "//www.promptingguide.ai/", ")", "dair.ai", ":", "exhaustive", "list", "prompt", "technique", "example", "*", "[", "outline", "-", "quickstart", "]", "(", "http", ":", "//outlines-dev.github.io/outlines/quickstart/", ")", ":", "list", "guided", "generation", "technique", "enabled", "outline", ".", "*", "[", "lmql", "-", "overview", "]", "(", "http", ":", "//lmql.ai/docs/language/overview.html", ")", ":", "introduction", "lmql", "language", ".", "--", "-" ], [ "1 .", "running llm running llm difficult due high hardware requirement .", "depending use case , might want simply consume model api ( like gpt-4 ) run locally .", "case , additional prompting guidance technique improve constrain output application .", "* * * llm apis * * : apis convenient way deploy llm .", "space divided private llm ( [ openai ] ( http : //platform.openai.com/ ) , [ google ] ( http : //cloud.google.com/vertex-ai/docs/generative-ai/learn/overview ) , [ anthropic ] ( http : //docs.anthropic.com/claude/reference/getting-started-with-the-api ) , [ cohere ] ( http : //docs.cohere.com/docs ) , etc . )", "open-source llm ( [ openrouter ] ( http : //openrouter.ai/ ) , [ hugging face ] ( http : //huggingface.co/inference-api ) , [ together ai ] ( http : //www.together.ai/ ) , etc . ) .", "* * * open-source llm * * : [ hugging face hub ] ( http : //huggingface.co/models ) great place find llm .", "directly run [ hugging face space ] ( http : //huggingface.co/spaces ) , download run locally apps like [ lm studio ] ( http : //lmstudio.ai/ ) cli [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) [ ollama ] ( http : //ollama.ai/ ) .", "* * * prompt engineering * * : common technique include zero-shot prompting , few-shot prompting , chain thought , react .", "work better bigger model , adapted smaller one .", "* * * structuring output * * : many task require structured output , like strict template json format .", "library like [ lmql ] ( http : //lmql.ai/ ) , [ outline ] ( http : //github.com/outlines-dev/outlines ) , [ guidance ] ( http : //github.com/guidance-ai/guidance ) , etc .", "used guide generation respect given structure .", "๐Ÿ“š * * reference * * : * [ run llm locally lm studio ] ( http : //www.kdnuggets.com/run-an-llm-locally-with-lm-studio ) nisha arya : short guide use lm studio .", "* [ prompt engineering guide ] ( http : //www.promptingguide.ai/ ) dair.ai : exhaustive list prompt technique example * [ outline - quickstart ] ( http : //outlines-dev.github.io/outlines/quickstart/ ) : list guided generation technique enabled outline .", "* [ lmql - overview ] ( http : //lmql.ai/docs/language/overview.html ) : introduction lmql language .", "-- -" ] ]
1. Running LLMs Running LLMs can be difficult due to high hardware requirements. Depending on your use case, you might want to simply consume a model through an API (like GPT-4) or run it locally. In any case, additional prompting and guidance techniques can improve and constrain the output for your applications. * **LLM APIs**: APIs are a convenient way to deploy LLMs. This space is divided between private LLMs ([OpenAI](https://platform.openai.com/), [Google](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview), [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api), [Cohere](https://docs.cohere.com/docs), etc.) and open-source LLMs ([OpenRouter](https://openrouter.ai/), [Hugging Face](https://huggingface.co/inference-api), [Together AI](https://www.together.ai/), etc.). * **Open-source LLMs**: The [Hugging Face Hub](https://huggingface.co/models) is a great place to find LLMs. You can directly run some of them in [Hugging Face Spaces](https://huggingface.co/spaces), or download and run them locally in apps like [LM Studio](https://lmstudio.ai/) or through the CLI with [llama.cpp](https://github.com/ggerganov/llama.cpp) or [Ollama](https://ollama.ai/). * **Prompt engineering**: Common techniques include zero-shot prompting, few-shot prompting, chain of thought, and ReAct. They work better with bigger models, but can be adapted to smaller ones. * **Structuring outputs**: Many tasks require a structured output, like a strict template or a JSON format. Libraries like [LMQL](https://lmql.ai/), [Outlines](https://github.com/outlines-dev/outlines), [Guidance](https://github.com/guidance-ai/guidance), etc. can be used to guide the generation and respect a given structure. ๐Ÿ“š **References**: * [Run an LLM locally with LM Studio](https://www.kdnuggets.com/run-an-llm-locally-with-lm-studio) by Nisha Arya: Short guide on how to use LM Studio. * [Prompt engineering guide](https://www.promptingguide.ai/) by DAIR.AI: Exhaustive list of prompt techniques with examples * [Outlines - Quickstart](https://outlines-dev.github.io/outlines/quickstart/): List of guided generation techniques enabled by Outlines. * [LMQL - Overview](https://lmql.ai/docs/language/overview.html): Introduction to the LMQL language. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "4", ".", "advanced", "rag", "real-life", "application", "require", "complex", "pipeline", ",", "including", "sql", "graph", "database", ",", "well", "automatically", "selecting", "relevant", "tool", "apis", ".", "advanced", "technique", "improve", "baseline", "solution", "provide", "additional", "feature", ".", "*", "*", "*", "query", "construction", "*", "*", ":", "structured", "data", "stored", "traditional", "database", "requires", "specific", "query", "language", "like", "sql", ",", "cypher", ",", "metadata", ",", "etc", ".", "directly", "translate", "user", "instruction", "query", "access", "data", "query", "construction", ".", "*", "*", "*", "agent", "tool", "*", "*", ":", "agent", "augment", "llm", "automatically", "selecting", "relevant", "tool", "provide", "answer", ".", "tool", "simple", "using", "google", "wikipedia", ",", "complex", "like", "python", "interpreter", "jira", ".", "*", "*", "*", "post-processing", "*", "*", ":", "final", "step", "process", "input", "fed", "llm", ".", "enhances", "relevance", "diversity", "document", "retrieved", "re-ranking", ",", "[", "rag-fusion", "]", "(", "http", ":", "//github.com/raudaschl/rag-fusion", ")", ",", "classification", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "langchain", "-", "query", "construction", "]", "(", "http", ":", "//blog.langchain.dev/query-construction/", ")", ":", "blog", "post", "different", "type", "query", "construction", ".", "*", "[", "langchain", "-", "sql", "]", "(", "http", ":", "//python.langchain.com/docs/use_cases/qa_structured/sql", ")", ":", "tutorial", "interact", "sql", "database", "llm", ",", "involving", "text-to-sql", "optional", "sql", "agent", ".", "*", "[", "pinecone", "-", "llm", "agent", "]", "(", "http", ":", "//www.pinecone.io/learn/series/langchain/langchain-agents/", ")", ":", "introduction", "agent", "tool", "different", "type", ".", "*", "[", "llm", "powered", "autonomous", "agent", "]", "(", "http", ":", "//lilianweng.github.io/posts/2023-06-23-agent/", ")", "lilian", "weng", ":", "theoretical", "article", "llm", "agent", ".", "*", "[", "langchain", "-", "openai", "'s", "rag", "]", "(", "http", ":", "//blog.langchain.dev/applying-openai-rag/", ")", ":", "overview", "rag", "strategy", "employed", "openai", ",", "including", "post-processing", ".", "--", "-" ], [ "4 .", "advanced rag real-life application require complex pipeline , including sql graph database , well automatically selecting relevant tool apis .", "advanced technique improve baseline solution provide additional feature .", "* * * query construction * * : structured data stored traditional database requires specific query language like sql , cypher , metadata , etc .", "directly translate user instruction query access data query construction .", "* * * agent tool * * : agent augment llm automatically selecting relevant tool provide answer .", "tool simple using google wikipedia , complex like python interpreter jira .", "* * * post-processing * * : final step process input fed llm .", "enhances relevance diversity document retrieved re-ranking , [ rag-fusion ] ( http : //github.com/raudaschl/rag-fusion ) , classification .", "๐Ÿ“š * * reference * * : * [ langchain - query construction ] ( http : //blog.langchain.dev/query-construction/ ) : blog post different type query construction .", "* [ langchain - sql ] ( http : //python.langchain.com/docs/use_cases/qa_structured/sql ) : tutorial interact sql database llm , involving text-to-sql optional sql agent .", "* [ pinecone - llm agent ] ( http : //www.pinecone.io/learn/series/langchain/langchain-agents/ ) : introduction agent tool different type .", "* [ llm powered autonomous agent ] ( http : //lilianweng.github.io/posts/2023-06-23-agent/ ) lilian weng : theoretical article llm agent .", "* [ langchain - openai 's rag ] ( http : //blog.langchain.dev/applying-openai-rag/ ) : overview rag strategy employed openai , including post-processing .", "-- -" ] ]
[ [ "4", ".", "advanced", "rag", "real-life", "application", "require", "complex", "pipeline", ",", "including", "sql", "graph", "database", ",", "well", "automatically", "selecting", "relevant", "tool", "apis", ".", "advanced", "technique", "improve", "baseline", "solution", "provide", "additional", "feature", ".", "*", "*", "*", "query", "construction", "*", "*", ":", "structured", "data", "stored", "traditional", "database", "requires", "specific", "query", "language", "like", "sql", ",", "cypher", ",", "metadata", ",", "etc", ".", "directly", "translate", "user", "instruction", "query", "access", "data", "query", "construction", ".", "*", "*", "*", "agent", "tool", "*", "*", ":", "agent", "augment", "llm", "automatically", "selecting", "relevant", "tool", "provide", "answer", ".", "tool", "simple", "using", "google", "wikipedia", ",", "complex", "like", "python", "interpreter", "jira", ".", "*", "*", "*", "post-processing", "*", "*", ":", "final", "step", "process", "input", "fed", "llm", ".", "enhances", "relevance", "diversity", "document", "retrieved", "re-ranking", ",", "[", "rag-fusion", "]", "(", "http", ":", "//github.com/raudaschl/rag-fusion", ")", ",", "classification", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "langchain", "-", "query", "construction", "]", "(", "http", ":", "//blog.langchain.dev/query-construction/", ")", ":", "blog", "post", "different", "type", "query", "construction", ".", "*", "[", "langchain", "-", "sql", "]", "(", "http", ":", "//python.langchain.com/docs/use_cases/qa_structured/sql", ")", ":", "tutorial", "interact", "sql", "database", "llm", ",", "involving", "text-to-sql", "optional", "sql", "agent", ".", "*", "[", "pinecone", "-", "llm", "agent", "]", "(", "http", ":", "//www.pinecone.io/learn/series/langchain/langchain-agents/", ")", ":", "introduction", "agent", "tool", "different", "type", ".", "*", "[", "llm", "powered", "autonomous", "agent", "]", "(", "http", ":", "//lilianweng.github.io/posts/2023-06-23-agent/", ")", "lilian", "weng", ":", "theoretical", "article", "llm", "agent", ".", "*", "[", "langchain", "-", "openai", "'s", "rag", "]", "(", "http", ":", "//blog.langchain.dev/applying-openai-rag/", ")", ":", "overview", "rag", "strategy", "employed", "openai", ",", "including", "post-processing", ".", "--", "-" ], [ "4 .", "advanced rag real-life application require complex pipeline , including sql graph database , well automatically selecting relevant tool apis .", "advanced technique improve baseline solution provide additional feature .", "* * * query construction * * : structured data stored traditional database requires specific query language like sql , cypher , metadata , etc .", "directly translate user instruction query access data query construction .", "* * * agent tool * * : agent augment llm automatically selecting relevant tool provide answer .", "tool simple using google wikipedia , complex like python interpreter jira .", "* * * post-processing * * : final step process input fed llm .", "enhances relevance diversity document retrieved re-ranking , [ rag-fusion ] ( http : //github.com/raudaschl/rag-fusion ) , classification .", "๐Ÿ“š * * reference * * : * [ langchain - query construction ] ( http : //blog.langchain.dev/query-construction/ ) : blog post different type query construction .", "* [ langchain - sql ] ( http : //python.langchain.com/docs/use_cases/qa_structured/sql ) : tutorial interact sql database llm , involving text-to-sql optional sql agent .", "* [ pinecone - llm agent ] ( http : //www.pinecone.io/learn/series/langchain/langchain-agents/ ) : introduction agent tool different type .", "* [ llm powered autonomous agent ] ( http : //lilianweng.github.io/posts/2023-06-23-agent/ ) lilian weng : theoretical article llm agent .", "* [ langchain - openai 's rag ] ( http : //blog.langchain.dev/applying-openai-rag/ ) : overview rag strategy employed openai , including post-processing .", "-- -" ] ]
4. Advanced RAG Real-life applications can require complex pipelines, including SQL or graph databases, as well as automatically selecting relevant tools and APIs. These advanced techniques can improve a baseline solution and provide additional features. * **Query construction**: Structured data stored in traditional databases requires a specific query language like SQL, Cypher, metadata, etc. We can directly translate the user instruction into a query to access the data with query construction. * **Agents and tools**: Agents augment LLMs by automatically selecting the most relevant tools to provide an answer. These tools can be as simple as using Google or Wikipedia, or more complex like a Python interpreter or Jira. * **Post-processing**: Final step that processes the inputs that are fed to the LLM. It enhances the relevance and diversity of documents retrieved with re-ranking, [RAG-fusion](https://github.com/Raudaschl/rag-fusion), and classification. ๐Ÿ“š **References**: * [LangChain - Query Construction](https://blog.langchain.dev/query-construction/): Blog post about different types of query construction. * [LangChain - SQL](https://python.langchain.com/docs/use_cases/qa_structured/sql): Tutorial on how to interact with SQL databases with LLMs, involving Text-to-SQL and an optional SQL agent. * [Pinecone - LLM agents](https://www.pinecone.io/learn/series/langchain/langchain-agents/): Introduction to agents and tools with different types. * [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) by Lilian Weng: More theoretical article about LLM agents. * [LangChain - OpenAI's RAG](https://blog.langchain.dev/applying-openai-rag/): Overview of the RAG strategies employed by OpenAI, including post-processing. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "5", ".", "inference", "optimization", "text", "generation", "costly", "process", "requires", "expensive", "hardware", ".", "addition", "quantization", ",", "various", "technique", "proposed", "maximize", "throughput", "reduce", "inference", "cost", ".", "*", "*", "*", "flash", "attention", "*", "*", ":", "optimization", "attention", "mechanism", "transform", "complexity", "quadratic", "linear", ",", "speeding", "training", "inference", ".", "*", "*", "*", "key-value", "cache", "*", "*", ":", "understand", "key-value", "cache", "improvement", "introduced", "[", "multi-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/1911.02150", ")", "(", "mqa", ")", "[", "grouped-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/2305.13245", ")", "(", "gqa", ")", ".", "*", "*", "*", "speculative", "decoding", "*", "*", ":", "use", "small", "model", "produce", "draft", "reviewed", "larger", "model", "speed", "text", "generation", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "gpu", "inference", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/perf_infer_gpu_one", ")", "hugging", "face", ":", "explain", "optimize", "inference", "gpus", ".", "*", "[", "llm", "inference", "]", "(", "http", ":", "//www.databricks.com/blog/llm-inference-performance-engineering-best-practices", ")", "databricks", ":", "best", "practice", "optimize", "llm", "inference", "production", ".", "*", "[", "optimizing", "llm", "speed", "memory", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/llm_tutorial_optimization", ")", "hugging", "face", ":", "explain", "three", "main", "technique", "optimize", "speed", "memory", ",", "namely", "quantization", ",", "flash", "attention", ",", "architectural", "innovation", ".", "*", "[", "assisted", "generation", "]", "(", "http", ":", "//huggingface.co/blog/assisted-generation", ")", "hugging", "face", ":", "hf", "'s", "version", "speculative", "decoding", ",", "'s", "interesting", "blog", "post", "work", "code", "implement", ".", "--", "-" ], [ "5 .", "inference optimization text generation costly process requires expensive hardware .", "addition quantization , various technique proposed maximize throughput reduce inference cost .", "* * * flash attention * * : optimization attention mechanism transform complexity quadratic linear , speeding training inference .", "* * * key-value cache * * : understand key-value cache improvement introduced [ multi-query attention ] ( http : //arxiv.org/abs/1911.02150 ) ( mqa ) [ grouped-query attention ] ( http : //arxiv.org/abs/2305.13245 ) ( gqa ) .", "* * * speculative decoding * * : use small model produce draft reviewed larger model speed text generation .", "๐Ÿ“š * * reference * * : * [ gpu inference ] ( http : //huggingface.co/docs/transformers/main/en/perf_infer_gpu_one ) hugging face : explain optimize inference gpus .", "* [ llm inference ] ( http : //www.databricks.com/blog/llm-inference-performance-engineering-best-practices ) databricks : best practice optimize llm inference production .", "* [ optimizing llm speed memory ] ( http : //huggingface.co/docs/transformers/main/en/llm_tutorial_optimization ) hugging face : explain three main technique optimize speed memory , namely quantization , flash attention , architectural innovation .", "* [ assisted generation ] ( http : //huggingface.co/blog/assisted-generation ) hugging face : hf 's version speculative decoding , 's interesting blog post work code implement .", "-- -" ] ]
[ [ "5", ".", "inference", "optimization", "text", "generation", "costly", "process", "requires", "expensive", "hardware", ".", "addition", "quantization", ",", "various", "technique", "proposed", "maximize", "throughput", "reduce", "inference", "cost", ".", "*", "*", "*", "flash", "attention", "*", "*", ":", "optimization", "attention", "mechanism", "transform", "complexity", "quadratic", "linear", ",", "speeding", "training", "inference", ".", "*", "*", "*", "key-value", "cache", "*", "*", ":", "understand", "key-value", "cache", "improvement", "introduced", "[", "multi-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/1911.02150", ")", "(", "mqa", ")", "[", "grouped-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/2305.13245", ")", "(", "gqa", ")", ".", "*", "*", "*", "speculative", "decoding", "*", "*", ":", "use", "small", "model", "produce", "draft", "reviewed", "larger", "model", "speed", "text", "generation", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "gpu", "inference", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/perf_infer_gpu_one", ")", "hugging", "face", ":", "explain", "optimize", "inference", "gpus", ".", "*", "[", "llm", "inference", "]", "(", "http", ":", "//www.databricks.com/blog/llm-inference-performance-engineering-best-practices", ")", "databricks", ":", "best", "practice", "optimize", "llm", "inference", "production", ".", "*", "[", "optimizing", "llm", "speed", "memory", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/llm_tutorial_optimization", ")", "hugging", "face", ":", "explain", "three", "main", "technique", "optimize", "speed", "memory", ",", "namely", "quantization", ",", "flash", "attention", ",", "architectural", "innovation", ".", "*", "[", "assisted", "generation", "]", "(", "http", ":", "//huggingface.co/blog/assisted-generation", ")", "hugging", "face", ":", "hf", "'s", "version", "speculative", "decoding", ",", "'s", "interesting", "blog", "post", "work", "code", "implement", ".", "--", "-" ], [ "5 .", "inference optimization text generation costly process requires expensive hardware .", "addition quantization , various technique proposed maximize throughput reduce inference cost .", "* * * flash attention * * : optimization attention mechanism transform complexity quadratic linear , speeding training inference .", "* * * key-value cache * * : understand key-value cache improvement introduced [ multi-query attention ] ( http : //arxiv.org/abs/1911.02150 ) ( mqa ) [ grouped-query attention ] ( http : //arxiv.org/abs/2305.13245 ) ( gqa ) .", "* * * speculative decoding * * : use small model produce draft reviewed larger model speed text generation .", "๐Ÿ“š * * reference * * : * [ gpu inference ] ( http : //huggingface.co/docs/transformers/main/en/perf_infer_gpu_one ) hugging face : explain optimize inference gpus .", "* [ llm inference ] ( http : //www.databricks.com/blog/llm-inference-performance-engineering-best-practices ) databricks : best practice optimize llm inference production .", "* [ optimizing llm speed memory ] ( http : //huggingface.co/docs/transformers/main/en/llm_tutorial_optimization ) hugging face : explain three main technique optimize speed memory , namely quantization , flash attention , architectural innovation .", "* [ assisted generation ] ( http : //huggingface.co/blog/assisted-generation ) hugging face : hf 's version speculative decoding , 's interesting blog post work code implement .", "-- -" ] ]
5. Inference optimization Text generation is a costly process that requires expensive hardware. In addition to quantization, various techniques have been proposed to maximize throughput and reduce inference costs. * **Flash Attention**: Optimization of the attention mechanism to transform its complexity from quadratic to linear, speeding up both training and inference. * **Key-value cache**: Understand the key-value cache and the improvements introduced in [Multi-Query Attention](https://arxiv.org/abs/1911.02150) (MQA) and [Grouped-Query Attention](https://arxiv.org/abs/2305.13245) (GQA). * **Speculative decoding**: Use a small model to produce drafts that are then reviewed by a larger model to speed up text generation. ๐Ÿ“š **References**: * [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one) by Hugging Face: Explain how to optimize inference on GPUs. * [LLM Inference](https://www.databricks.com/blog/llm-inference-performance-engineering-best-practices) by Databricks: Best practices for how to optimize LLM inference in production. * [Optimizing LLMs for Speed and Memory](https://huggingface.co/docs/transformers/main/en/llm_tutorial_optimization) by Hugging Face: Explain three main techniques to optimize speed and memory, namely quantization, Flash Attention, and architectural innovations. * [Assisted Generation](https://huggingface.co/blog/assisted-generation) by Hugging Face: HF's version of speculative decoding, it's an interesting blog post about how it works with code to implement it. ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "7", ".", "securing", "llm", "addition", "traditional", "security", "problem", "associated", "software", ",", "llm", "unique", "weakness", "due", "way", "trained", "prompted", ".", "*", "*", "*", "prompt", "hacking", "*", "*", ":", "different", "technique", "related", "prompt", "engineering", ",", "including", "prompt", "injection", "(", "additional", "instruction", "hijack", "model", "'s", "answer", ")", ",", "data/prompt", "leaking", "(", "retrieve", "original", "data/prompt", ")", ",", "jailbreaking", "(", "craft", "prompt", "bypass", "safety", "feature", ")", ".", "*", "*", "*", "backdoor", "*", "*", ":", "attack", "vector", "target", "training", "data", ",", "poisoning", "training", "data", "(", "e.g.", ",", "false", "information", ")", "creating", "backdoor", "(", "secret", "trigger", "change", "model", "'s", "behavior", "inference", ")", ".", "*", "*", "*", "defensive", "measure", "*", "*", ":", "best", "way", "protect", "llm", "application", "test", "vulnerability", "(", "e.g.", ",", "using", "red", "teaming", "check", "like", "[", "garak", "]", "(", "http", ":", "//github.com/leondz/garak/", ")", ")", "observe", "production", "(", "framework", "like", "[", "langfuse", "]", "(", "http", ":", "//github.com/langfuse/langfuse", ")", ")", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "owasp", "llm", "top", "10", "]", "(", "http", ":", "//owasp.org/www-project-top-10-for-large-language-model-applications/", ")", "hego", "wiki", ":", "list", "10", "critic", "vulnerability", "seen", "llm", "application", ".", "*", "[", "prompt", "injection", "primer", "]", "(", "http", ":", "//github.com/jthack/pipe", ")", "joseph", "thacker", ":", "short", "guide", "dedicated", "prompt", "injection", "engineer", ".", "*", "[", "llm", "security", "]", "(", "http", ":", "//llmsecurity.net/", ")", "[", "@", "llm_sec", "]", "(", "http", ":", "//twitter.com/llm_sec", ")", ":", "extensive", "list", "resource", "related", "llm", "security", ".", "*", "[", "red", "teaming", "llm", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming", ")", "microsoft", ":", "guide", "perform", "red", "teaming", "llm", ".", "--", "-" ], [ "7 .", "securing llm addition traditional security problem associated software , llm unique weakness due way trained prompted .", "* * * prompt hacking * * : different technique related prompt engineering , including prompt injection ( additional instruction hijack model 's answer ) , data/prompt leaking ( retrieve original data/prompt ) , jailbreaking ( craft prompt bypass safety feature ) .", "* * * backdoor * * : attack vector target training data , poisoning training data ( e.g. , false information ) creating backdoor ( secret trigger change model 's behavior inference ) .", "* * * defensive measure * * : best way protect llm application test vulnerability ( e.g. , using red teaming check like [ garak ] ( http : //github.com/leondz/garak/ ) ) observe production ( framework like [ langfuse ] ( http : //github.com/langfuse/langfuse ) ) .", "๐Ÿ“š * * reference * * : * [ owasp llm top 10 ] ( http : //owasp.org/www-project-top-10-for-large-language-model-applications/ ) hego wiki : list 10 critic vulnerability seen llm application .", "* [ prompt injection primer ] ( http : //github.com/jthack/pipe ) joseph thacker : short guide dedicated prompt injection engineer .", "* [ llm security ] ( http : //llmsecurity.net/ ) [ @ llm_sec ] ( http : //twitter.com/llm_sec ) : extensive list resource related llm security .", "* [ red teaming llm ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming ) microsoft : guide perform red teaming llm .", "-- -" ] ]
[ [ "7", ".", "securing", "llm", "addition", "traditional", "security", "problem", "associated", "software", ",", "llm", "unique", "weakness", "due", "way", "trained", "prompted", ".", "*", "*", "*", "prompt", "hacking", "*", "*", ":", "different", "technique", "related", "prompt", "engineering", ",", "including", "prompt", "injection", "(", "additional", "instruction", "hijack", "model", "'s", "answer", ")", ",", "data/prompt", "leaking", "(", "retrieve", "original", "data/prompt", ")", ",", "jailbreaking", "(", "craft", "prompt", "bypass", "safety", "feature", ")", ".", "*", "*", "*", "backdoor", "*", "*", ":", "attack", "vector", "target", "training", "data", ",", "poisoning", "training", "data", "(", "e.g.", ",", "false", "information", ")", "creating", "backdoor", "(", "secret", "trigger", "change", "model", "'s", "behavior", "inference", ")", ".", "*", "*", "*", "defensive", "measure", "*", "*", ":", "best", "way", "protect", "llm", "application", "test", "vulnerability", "(", "e.g.", ",", "using", "red", "teaming", "check", "like", "[", "garak", "]", "(", "http", ":", "//github.com/leondz/garak/", ")", ")", "observe", "production", "(", "framework", "like", "[", "langfuse", "]", "(", "http", ":", "//github.com/langfuse/langfuse", ")", ")", ".", "๐Ÿ“š", "*", "*", "reference", "*", "*", ":", "*", "[", "owasp", "llm", "top", "10", "]", "(", "http", ":", "//owasp.org/www-project-top-10-for-large-language-model-applications/", ")", "hego", "wiki", ":", "list", "10", "critic", "vulnerability", "seen", "llm", "application", ".", "*", "[", "prompt", "injection", "primer", "]", "(", "http", ":", "//github.com/jthack/pipe", ")", "joseph", "thacker", ":", "short", "guide", "dedicated", "prompt", "injection", "engineer", ".", "*", "[", "llm", "security", "]", "(", "http", ":", "//llmsecurity.net/", ")", "[", "@", "llm_sec", "]", "(", "http", ":", "//twitter.com/llm_sec", ")", ":", "extensive", "list", "resource", "related", "llm", "security", ".", "*", "[", "red", "teaming", "llm", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming", ")", "microsoft", ":", "guide", "perform", "red", "teaming", "llm", ".", "--", "-" ], [ "7 .", "securing llm addition traditional security problem associated software , llm unique weakness due way trained prompted .", "* * * prompt hacking * * : different technique related prompt engineering , including prompt injection ( additional instruction hijack model 's answer ) , data/prompt leaking ( retrieve original data/prompt ) , jailbreaking ( craft prompt bypass safety feature ) .", "* * * backdoor * * : attack vector target training data , poisoning training data ( e.g. , false information ) creating backdoor ( secret trigger change model 's behavior inference ) .", "* * * defensive measure * * : best way protect llm application test vulnerability ( e.g. , using red teaming check like [ garak ] ( http : //github.com/leondz/garak/ ) ) observe production ( framework like [ langfuse ] ( http : //github.com/langfuse/langfuse ) ) .", "๐Ÿ“š * * reference * * : * [ owasp llm top 10 ] ( http : //owasp.org/www-project-top-10-for-large-language-model-applications/ ) hego wiki : list 10 critic vulnerability seen llm application .", "* [ prompt injection primer ] ( http : //github.com/jthack/pipe ) joseph thacker : short guide dedicated prompt injection engineer .", "* [ llm security ] ( http : //llmsecurity.net/ ) [ @ llm_sec ] ( http : //twitter.com/llm_sec ) : extensive list resource related llm security .", "* [ red teaming llm ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming ) microsoft : guide perform red teaming llm .", "-- -" ] ]
7. Securing LLMs In addition to traditional security problems associated with software, LLMs have unique weaknesses due to the way they are trained and prompted. * **Prompt hacking**: Different techniques related to prompt engineering, including prompt injection (additional instruction to hijack the model's answer), data/prompt leaking (retrieve its original data/prompt), and jailbreaking (craft prompts to bypass safety features). * **Backdoors**: Attack vectors can target the training data itself, by poisoning the training data (e.g., with false information) or creating backdoors (secret triggers to change the model's behavior during inference). * **Defensive measures**: The best way to protect your LLM applications is to test them against these vulnerabilities (e.g., using red teaming and checks like [garak](https://github.com/leondz/garak/)) and observe them in production (with a framework like [langfuse](https://github.com/langfuse/langfuse)). ๐Ÿ“š **References**: * [OWASP LLM Top 10](https://owasp.org/www-project-top-10-for-large-language-model-applications/) by HEGO Wiki: List of the 10 most critic vulnerabilities seen in LLM applications. * [Prompt Injection Primer](https://github.com/jthack/PIPE) by Joseph Thacker: Short guide dedicated to prompt injection for engineers. * [LLM Security](https://llmsecurity.net/) by [@llm_sec](https://twitter.com/llm_sec): Extensive list of resources related to LLM security. * [Red teaming LLMs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming) by Microsoft: Guide on how to perform red teaming with LLMs. ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md
[ [ "โšกquick", "start", "download", "install", "[", "nodejs", "]", "(", "http", ":", "//nodejs.org/en/download", ")", ">", "=", "18.15.0", "1", ".", "install", "flowise", "``", "`", "bash", "npm", "install", "-g", "flowise", "``", "`", "2", ".", "start", "flowise", "``", "`", "bash", "npx", "flowise", "start", "``", "`", "username", "&", "password", "``", "`", "bash", "npx", "flowise", "start", "--", "flowise_username=user", "--", "flowise_password=1234", "``", "`", "3", ".", "open", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")" ], [ "โšกquick start download install [ nodejs ] ( http : //nodejs.org/en/download ) > = 18.15.0 1 .", "install flowise `` ` bash npm install -g flowise `` ` 2 .", "start flowise `` ` bash npx flowise start `` ` username & password `` ` bash npx flowise start -- flowise_username=user -- flowise_password=1234 `` ` 3 .", "open [ http : //localhost:3000 ] ( http : //localhost:3000 )" ] ]
[ [ "โšกquick", "start", "download", "install", "[", "nodejs", "]", "(", "http", ":", "//nodejs.org/en/download", ")", ">", "=", "18.15.0", "1", ".", "install", "flowise", "``", "`", "bash", "npm", "install", "-g", "flowise", "``", "`", "2", ".", "start", "flowise", "``", "`", "bash", "npx", "flowise", "start", "``", "`", "username", "&", "password", "``", "`", "bash", "npx", "flowise", "start", "--", "flowise_username=user", "--", "flowise_password=1234", "``", "`", "3", ".", "open", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")" ], [ "โšกquick start download install [ nodejs ] ( http : //nodejs.org/en/download ) > = 18.15.0 1 .", "install flowise `` ` bash npm install -g flowise `` ` 2 .", "start flowise `` ` bash npx flowise start `` ` username & password `` ` bash npx flowise start -- flowise_username=user -- flowise_password=1234 `` ` 3 .", "open [ http : //localhost:3000 ] ( http : //localhost:3000 )" ] ]
โšกQuick Start Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0 1. Install Flowise ```bash npm install -g flowise ``` 2. Start Flowise ```bash npx flowise start ``` With username & password ```bash npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234 ``` 3. Open [http://localhost:3000](http://localhost:3000)
https://github.com/FlowiseAI/Flowise
-1
[ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ]
https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md
[ [ "prerequisite", "-", "install", "[", "yarn", "v1", "]", "(", "http", ":", "//classic.yarnpkg.com/en/docs/install", ")", "``", "`", "bash", "npm", "-g", "yarn", "``", "`" ], [ "prerequisite - install [ yarn v1 ] ( http : //classic.yarnpkg.com/en/docs/install ) `` ` bash npm -g yarn `` `" ] ]
[ [ "prerequisite", "-", "install", "[", "yarn", "v1", "]", "(", "http", ":", "//classic.yarnpkg.com/en/docs/install", ")", "``", "`", "bash", "npm", "-g", "yarn", "``", "`" ], [ "prerequisite - install [ yarn v1 ] ( http : //classic.yarnpkg.com/en/docs/install ) `` ` bash npm -g yarn `` `" ] ]
Prerequisite - Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install) ```bash npm i -g yarn ```
https://github.com/FlowiseAI/Flowise
-1
[ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ]
https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md
[ [ "setup", "1", ".", "clone", "repository", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/flowiseai/flowise.git", "``", "`", "2", ".", "go", "repository", "folder", "``", "`", "bash", "cd", "flowise", "``", "`", "3", ".", "install", "dependency", "module", ":", "``", "`", "bash", "yarn", "install", "``", "`", "4", ".", "build", "code", ":", "``", "`", "bash", "yarn", "build", "``", "`", "5", ".", "start", "app", ":", "``", "`", "bash", "yarn", "start", "``", "`", "access", "app", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")", "6", ".", "development", "build", ":", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/ui", "`", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/server", "`", "-", "run", "``", "`", "bash", "yarn", "dev", "``", "`", "code", "change", "reload", "app", "automatically", "[", "http", ":", "//localhost:8080", "]", "(", "http", ":", "//localhost:8080", ")" ], [ "setup 1 .", "clone repository `` ` bash git clone http : //github.com/flowiseai/flowise.git `` ` 2 .", "go repository folder `` ` bash cd flowise `` ` 3 .", "install dependency module : `` ` bash yarn install `` ` 4 .", "build code : `` ` bash yarn build `` ` 5 .", "start app : `` ` bash yarn start `` ` access app [ http : //localhost:3000 ] ( http : //localhost:3000 ) 6 .", "development build : - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/ui ` - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/server ` - run `` ` bash yarn dev `` ` code change reload app automatically [ http : //localhost:8080 ] ( http : //localhost:8080 )" ] ]
[ [ "setup", "1", ".", "clone", "repository", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/flowiseai/flowise.git", "``", "`", "2", ".", "go", "repository", "folder", "``", "`", "bash", "cd", "flowise", "``", "`", "3", ".", "install", "dependency", "module", ":", "``", "`", "bash", "yarn", "install", "``", "`", "4", ".", "build", "code", ":", "``", "`", "bash", "yarn", "build", "``", "`", "5", ".", "start", "app", ":", "``", "`", "bash", "yarn", "start", "``", "`", "access", "app", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")", "6", ".", "development", "build", ":", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/ui", "`", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/server", "`", "-", "run", "``", "`", "bash", "yarn", "dev", "``", "`", "code", "change", "reload", "app", "automatically", "[", "http", ":", "//localhost:8080", "]", "(", "http", ":", "//localhost:8080", ")" ], [ "setup 1 .", "clone repository `` ` bash git clone http : //github.com/flowiseai/flowise.git `` ` 2 .", "go repository folder `` ` bash cd flowise `` ` 3 .", "install dependency module : `` ` bash yarn install `` ` 4 .", "build code : `` ` bash yarn build `` ` 5 .", "start app : `` ` bash yarn start `` ` access app [ http : //localhost:3000 ] ( http : //localhost:3000 ) 6 .", "development build : - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/ui ` - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/server ` - run `` ` bash yarn dev `` ` code change reload app automatically [ http : //localhost:8080 ] ( http : //localhost:8080 )" ] ]
Setup 1. Clone the repository ```bash git clone https://github.com/FlowiseAI/Flowise.git ``` 2. Go into repository folder ```bash cd Flowise ``` 3. Install all dependencies of all modules: ```bash yarn install ``` 4. Build all the code: ```bash yarn build ``` 5. Start the app: ```bash yarn start ``` You can now access the app on [http://localhost:3000](http://localhost:3000) 6. For development build: - Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui` - Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server` - Run ```bash yarn dev ``` Any code changes will reload the app automatically on [http://localhost:8080](http://localhost:8080)
https://github.com/FlowiseAI/Flowise
2
[ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ]
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
37
Edit dataset card