lewtun HF staff commited on
Commit
c05fb53
1 Parent(s): 8494d2c
Files changed (3) hide show
  1. app.ipynb +18 -3
  2. app.py +9 -5
  3. requirements.txt +1 -0
app.ipynb CHANGED
@@ -10,7 +10,22 @@
10
  "import gradio as gr\n",
11
  "import requests\n",
12
  "import json\n",
13
- "import requests"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  ]
15
  },
16
  {
@@ -27,7 +42,7 @@
27
  " top_p\n",
28
  "):\n",
29
  " API_URL = f\"https://api-inference.huggingface.co/models/{model_id}\"\n",
30
- " headers = {\"Authorization\": \"Bearer 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\", \"x-wait-for-model\": \"1\"}\n",
31
  "\n",
32
  " payload = {\n",
33
  " \"inputs\": inputs,\n",
@@ -830,7 +845,7 @@
830
  },
831
  {
832
  "cell_type": "code",
833
- "execution_count": 37,
834
  "metadata": {},
835
  "outputs": [],
836
  "source": [
 
10
  "import gradio as gr\n",
11
  "import requests\n",
12
  "import json\n",
13
+ "import requests\n",
14
+ "import os\n",
15
+ "from pathlib import Path\n",
16
+ "from dotenv import load_dotenv\n"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {},
23
+ "outputs": [],
24
+ "source": [
25
+ "if Path(\".env\").is_file():\n",
26
+ " load_dotenv(\".env\")\n",
27
+ "\n",
28
+ "HF_TOKEN = os.getenv(\"HF_TOKEN\")\n"
29
  ]
30
  },
31
  {
 
42
  " top_p\n",
43
  "):\n",
44
  " API_URL = f\"https://api-inference.huggingface.co/models/{model_id}\"\n",
45
+ " headers = {\"Authorization\": \"Bearer \", \"x-wait-for-model\": \"1\"}\n",
46
  "\n",
47
  " payload = {\n",
48
  " \"inputs\": inputs,\n",
 
845
  },
846
  {
847
  "cell_type": "code",
848
+ "execution_count": 38,
849
  "metadata": {},
850
  "outputs": [],
851
  "source": [
app.py CHANGED
@@ -8,8 +8,12 @@ import gradio as gr
8
  import requests
9
  import json
10
  import requests
 
 
 
11
 
12
- # %% app.ipynb 1
 
13
  def query_chat_api(
14
  model_id,
15
  inputs,
@@ -17,7 +21,7 @@ def query_chat_api(
17
  top_p
18
  ):
19
  API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
20
- headers = {"Authorization": "Bearer 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", "x-wait-for-model": "1"}
21
 
22
  payload = {
23
  "inputs": inputs,
@@ -37,7 +41,7 @@ def query_chat_api(
37
  return "Error: " + response.text
38
 
39
 
40
- # %% app.ipynb 4
41
  def inference_chat(
42
  model_id,
43
  prompt_template,
@@ -62,7 +66,7 @@ def inference_chat(
62
  return {chatbot: chat, state: history}
63
 
64
 
65
- # %% app.ipynb 12
66
  title = """<h1 align="center">Chatty Language Models</h1>"""
67
  description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
68
 
@@ -87,7 +91,7 @@ So far, the following prompts are available:
87
  As you can see, most of these prompts exceed the maximum context size of models like Flan-T5, so an error usually means the Inference API has timed out.
88
  """
89
 
90
- # %% app.ipynb 13
91
  with gr.Blocks(
92
  css="""
93
  .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px}
 
8
  import requests
9
  import json
10
  import requests
11
+ import os
12
+ from pathlib import Path
13
+ from dotenv import load_dotenv
14
 
15
+
16
+ # %% app.ipynb 2
17
  def query_chat_api(
18
  model_id,
19
  inputs,
 
21
  top_p
22
  ):
23
  API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
24
+ headers = {"Authorization": "Bearer ", "x-wait-for-model": "1"}
25
 
26
  payload = {
27
  "inputs": inputs,
 
41
  return "Error: " + response.text
42
 
43
 
44
+ # %% app.ipynb 5
45
  def inference_chat(
46
  model_id,
47
  prompt_template,
 
66
  return {chatbot: chat, state: history}
67
 
68
 
69
+ # %% app.ipynb 13
70
  title = """<h1 align="center">Chatty Language Models</h1>"""
71
  description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
72
 
 
91
  As you can see, most of these prompts exceed the maximum context size of models like Flan-T5, so an error usually means the Inference API has timed out.
92
  """
93
 
94
+ # %% app.ipynb 14
95
  with gr.Blocks(
96
  css="""
97
  .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px}
requirements.txt CHANGED
@@ -1 +1,2 @@
1
  requests
 
 
1
  requests
2
+ python-dotenv