Spaces:
Sleeping
Sleeping
Created using Colaboratory
Browse files- mixtral_ollama_public_api.ipynb +170 -0
mixtral_ollama_public_api.ipynb
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {
|
6 |
+
"id": "view-in-github",
|
7 |
+
"colab_type": "text"
|
8 |
+
},
|
9 |
+
"source": [
|
10 |
+
"<a href=\"https://colab.research.google.com/github/almutareb/InnovationPathfinderAI/blob/agent_notebook/mixtral_ollama_public_api.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
11 |
+
]
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"cell_type": "code",
|
15 |
+
"execution_count": null,
|
16 |
+
"metadata": {
|
17 |
+
"id": "hdHmLCO48n-H"
|
18 |
+
},
|
19 |
+
"outputs": [],
|
20 |
+
"source": [
|
21 |
+
"# https://github.com/jmorganca/ollama/blob/main/examples/jupyter-notebook/ollama.ipynb"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"cell_type": "code",
|
26 |
+
"execution_count": null,
|
27 |
+
"metadata": {
|
28 |
+
"colab": {
|
29 |
+
"base_uri": "https://localhost:8080/"
|
30 |
+
},
|
31 |
+
"id": "o5Q6OmcN0mkS",
|
32 |
+
"outputId": "c7a663c4-b1d8-4354-d1f2-fac8f503f4d3"
|
33 |
+
},
|
34 |
+
"outputs": [
|
35 |
+
{
|
36 |
+
"output_type": "stream",
|
37 |
+
"name": "stdout",
|
38 |
+
"text": [
|
39 |
+
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
|
40 |
+
" Dload Upload Total Spent Left Speed\n",
|
41 |
+
"100 8423 0 8423 0 0 8293 0 --:--:-- 0:00:01 --:--:-- 8298>>> Downloading ollama...\n",
|
42 |
+
"100 8423 0 8423 0 0 7756 0 --:--:-- 0:00:01 --:--:-- 7763\n",
|
43 |
+
"############################################################################################# 100.0%\n",
|
44 |
+
">>> Installing ollama to /usr/local/bin...\n",
|
45 |
+
">>> Creating ollama user...\n",
|
46 |
+
">>> Adding current user to ollama group...\n",
|
47 |
+
">>> Creating ollama systemd service...\n",
|
48 |
+
"WARNING: Unable to detect NVIDIA GPU. Install lspci or lshw to automatically detect and install NVIDIA CUDA drivers.\n",
|
49 |
+
">>> The Ollama API is now available at 0.0.0.0:11434.\n",
|
50 |
+
">>> Install complete. Run \"ollama\" from the command line.\n",
|
51 |
+
"System has not been booted with systemd as init system (PID 1). Can't operate.\n",
|
52 |
+
"Failed to connect to bus: Host is down\n"
|
53 |
+
]
|
54 |
+
}
|
55 |
+
],
|
56 |
+
"source": [
|
57 |
+
"!curl https://ollama.ai/install.sh | sh\n",
|
58 |
+
"!command -v systemctl >/dev/null && sudo systemctl stop ollama"
|
59 |
+
]
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"cell_type": "code",
|
63 |
+
"execution_count": null,
|
64 |
+
"metadata": {
|
65 |
+
"id": "TXtmzrel194r"
|
66 |
+
},
|
67 |
+
"outputs": [],
|
68 |
+
"source": [
|
69 |
+
"!pip install aiohttp pyngrok -q"
|
70 |
+
]
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"cell_type": "code",
|
74 |
+
"execution_count": null,
|
75 |
+
"metadata": {
|
76 |
+
"id": "GLPqSWXT3QBg",
|
77 |
+
"colab": {
|
78 |
+
"base_uri": "https://localhost:8080/"
|
79 |
+
},
|
80 |
+
"outputId": "a7542b67-3287-4ce5-8459-2e86d8ec7dfd"
|
81 |
+
},
|
82 |
+
"outputs": [
|
83 |
+
{
|
84 |
+
"output_type": "stream",
|
85 |
+
"name": "stdout",
|
86 |
+
"text": []
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"source": [
|
90 |
+
"from pyngrok import ngrok\n",
|
91 |
+
"from google.colab import userdata\n",
|
92 |
+
"\n",
|
93 |
+
"# https://dashboard.ngrok.com/get-started/your-authtoken\n",
|
94 |
+
"NGROK_TOKEN = userdata.get('NGROK_TOKEN')\n",
|
95 |
+
"ngrok.set_auth_token(\"NGROK_TOKEN\")"
|
96 |
+
]
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"cell_type": "code",
|
100 |
+
"source": [
|
101 |
+
"\n",
|
102 |
+
"\n",
|
103 |
+
"import os\n",
|
104 |
+
"import asyncio\n",
|
105 |
+
"from aiohttp import ClientSession\n",
|
106 |
+
"from google.colab import userdata\n",
|
107 |
+
"\n",
|
108 |
+
"# Set LD_LIBRARY_PATH so the system NVIDIA library becomes preferred\n",
|
109 |
+
"# over the built-in library. This is particularly important for\n",
|
110 |
+
"# Google Colab which installs older drivers\n",
|
111 |
+
"os.environ.update({'LD_LIBRARY_PATH': '/usr/lib64-nvidia'})\n",
|
112 |
+
"NGROK_TOKEN = userdata.get('NGROK_TOKEN')\n",
|
113 |
+
"ngrok.set_auth_token(\"NGROK_TOKEN\")\n",
|
114 |
+
"\n",
|
115 |
+
"async def run(cmd):\n",
|
116 |
+
" '''\n",
|
117 |
+
" run is a helper function to run subcommands asynchronously.\n",
|
118 |
+
" '''\n",
|
119 |
+
" print('>>> starting', *cmd)\n",
|
120 |
+
" p = await asyncio.subprocess.create_subprocess_exec(\n",
|
121 |
+
" *cmd,\n",
|
122 |
+
" stdout=asyncio.subprocess.PIPE,\n",
|
123 |
+
" stderr=asyncio.subprocess.PIPE,\n",
|
124 |
+
" )\n",
|
125 |
+
"\n",
|
126 |
+
" async def pipe(lines):\n",
|
127 |
+
" async for line in lines:\n",
|
128 |
+
" print(line.strip().decode('utf-8'))\n",
|
129 |
+
"\n",
|
130 |
+
" await asyncio.gather(\n",
|
131 |
+
" pipe(p.stdout),\n",
|
132 |
+
" pipe(p.stderr),\n",
|
133 |
+
" )\n",
|
134 |
+
"\n",
|
135 |
+
"\n",
|
136 |
+
"await asyncio.gather(\n",
|
137 |
+
" run(['ollama', 'serve']),\n",
|
138 |
+
" run(['ngrok', 'config', 'add-authtoken', NGROK_TOKEN]),\n",
|
139 |
+
" run(['ngrok', 'http', '--log', 'stderr', '11434']),\n",
|
140 |
+
" # run([\"ollama\",\"run\",\"llama2\"]),\n",
|
141 |
+
" run([\"ollama\",\"run\",\"mistral\"]),\n",
|
142 |
+
")"
|
143 |
+
],
|
144 |
+
"metadata": {
|
145 |
+
"id": "u1PG3y83Z-u5"
|
146 |
+
},
|
147 |
+
"execution_count": null,
|
148 |
+
"outputs": []
|
149 |
+
}
|
150 |
+
],
|
151 |
+
"metadata": {
|
152 |
+
"colab": {
|
153 |
+
"provenance": [],
|
154 |
+
"machine_shape": "hm",
|
155 |
+
"gpuType": "T4",
|
156 |
+
"authorship_tag": "ABX9TyMLapRySjkHA++gSoAkRSHn",
|
157 |
+
"include_colab_link": true
|
158 |
+
},
|
159 |
+
"kernelspec": {
|
160 |
+
"display_name": "Python 3",
|
161 |
+
"name": "python3"
|
162 |
+
},
|
163 |
+
"language_info": {
|
164 |
+
"name": "python"
|
165 |
+
},
|
166 |
+
"accelerator": "GPU"
|
167 |
+
},
|
168 |
+
"nbformat": 4,
|
169 |
+
"nbformat_minor": 0
|
170 |
+
}
|