Spaces:
Sleeping
Sleeping
File size: 3,983 Bytes
233119d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "HLdoj4cz-xal"
},
"source": [
"# Run.c\n",
"\n",
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/karpathy/llama2.c/blob/master/run.ipynb)\n",
"\n",
"More details can be found in the [README.md](README.md) ."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Une3Ozlnu1B7"
},
"outputs": [],
"source": [
"#@title Clone Project\n",
"\n",
"!git clone https://github.com/karpathy/llama2.c.git\n",
"%cd llama2.c"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#@title Build\n",
"\n",
"!make runfast"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "thm0ZBrtSgoC"
},
"outputs": [],
"source": [
"#@title Pick Your Model\n",
"\n",
"#@markdown Choose model\n",
"model = \"stories15M\" #@param [\"stories15M\", \"stories42M\", \"stories110M\"]\n",
"\n",
"download_url = \"\"\n",
"\n",
"if(model == \"stories15M\"):\n",
" download_url = \"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin\"\n",
"if(model == \"stories42M\"):\n",
" download_url = \"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin\"\n",
"if(model == \"stories110M\"):\n",
" download_url = \"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin\"\n",
"\n",
"print(f\"download_url: {download_url}\")\n",
"\n",
"!wget $download_url\n",
"\n",
"model_file = model + \".bin\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "OgAc3KjuT-NM"
},
"outputs": [],
"source": [
"#@title Generate Stories\n",
"\n",
"# Generate args\n",
"max_token = 256 #@param {type:\"slider\", min:32, max:1024, step:32}\n",
"temperature = 0.8 #@param {type:\"slider\", min:0.0, max:1, step:0.05}\n",
"top_p = 0.9 #@param {type:\"slider\", min:0.0, max:1.0, step:0.05}\n",
"prompt = \"One day, Lily met a Shoggoth\" #@param {type:\"string\"}\n",
"\n",
"print(f\"model: {model_file}, max_token: {max_token}, temperature: {temperature}, top_p: {top_p}, prompt: {prompt}\")\n",
"print(f\"----------------------------\\n\")\n",
"\n",
"cmd = f'./run {model_file} -t {temperature} -p {top_p} -n {max_token} -i \"{prompt}\"'\n",
"!{cmd}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#@title Run Meta's Llama 2 models\n",
"\n",
"#@markdown input your huggingface [access token](https://huggingface.co/settings/tokens) to download Meta's Llama 2 models.\n",
"\n",
"from huggingface_hub import snapshot_download\n",
"\n",
"token = \"replace your huggingface access token\" #@param {type:\"string\"}\n",
"path = snapshot_download(repo_id=\"meta-llama/Llama-2-7b\",cache_dir=\"Llama-2-7b\", use_auth_token=token)\n",
"\n",
"!python export_meta_llama_bin.py $path llama2_7b.bin\n",
"\n",
"print(\"./run llama2_7b.bin\\n\")\n",
"!./run llama2_7b.bin"
]
}
],
"metadata": {
"colab": {
"private_outputs": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
|