teticio commited on
Commit
d1cf2b0
1 Parent(s): f40e7fa

add colab notebook

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +30 -31
  3. notebooks/gradio_app.ipynb +99 -0
README.md CHANGED
@@ -10,7 +10,7 @@ pinned: false
10
  license: gpl-3.0
11
  ---
12
 
13
- # inBERTolate
14
  ## Hit your word count by using BERT to pad out your essays!
15
 
16
  Sentences are generated that are in context with both the preceding and following sentences. Models like GPT are not well suited to this task as they are Causal Language Models, or autoregressive models, that generate tokens from left to right, conditional on the text that has come before. The B in BERT, on the other hand, stands for "Bidirectional" and it was trained to be able to fill in the gaps using context on either side. BERT is an example of an autoencoder model.
 
10
  license: gpl-3.0
11
  ---
12
 
13
+ # inBERTolate [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/teticio/inBERTolate/blob/master/notebooks/gradio_app.ipynb)
14
  ## Hit your word count by using BERT to pad out your essays!
15
 
16
  Sentences are generated that are in context with both the preceding and following sentences. Models like GPT are not well suited to this task as they are Causal Language Models, or autoregressive models, that generate tokens from left to right, conditional on the text that has come before. The B in BERT, on the other hand, stands for "Bidirectional" and it was trained to be able to fill in the gaps using context on either side. BERT is an example of an autoencoder model.
app.py CHANGED
@@ -164,41 +164,40 @@ def inbertolate(doc: str,
164
  new_doc += '\n'
165
  return new_doc
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  if __name__ == '__main__':
169
  parser = argparse.ArgumentParser()
170
  parser.add_argument('--port', type=int)
171
  parser.add_argument('--server', type=int)
172
  args = parser.parse_args()
173
-
174
- demo = gr.Interface(
175
- fn=inbertolate,
176
- title="inBERTolate",
177
- description=f"Hit your word count by using BERT ({pretrained}) to pad out your essays!",
178
- inputs=[
179
- gr.Textbox(label="Text", lines=10),
180
- gr.Slider(label="Maximum length to insert between sentences",
181
- minimum=1,
182
- maximum=40,
183
- step=1,
184
- value=max_len),
185
- gr.Slider(label="Top k", minimum=0, maximum=200, value=top_k),
186
- gr.Slider(label="Temperature",
187
- minimum=0,
188
- maximum=2,
189
- value=temperature),
190
- gr.Slider(label="Typical p",
191
- minimum=0,
192
- maximum=1,
193
- value=typical_p),
194
- gr.Slider(label="Maximum iterations",
195
- minimum=0,
196
- maximum=1000,
197
- value=max_iter),
198
- gr.Slider(label="Burn-in",
199
- minimum=0,
200
- maximum=500,
201
- value=burnin),
202
- ],
203
- outputs=gr.Textbox(label="Expanded text", lines=30))
204
  demo.launch(server_name=args.server or '0.0.0.0', server_port=args.port)
 
164
  new_doc += '\n'
165
  return new_doc
166
 
167
+ demo = gr.Interface(
168
+ fn=inbertolate,
169
+ title="inBERTolate",
170
+ description=f"Hit your word count by using BERT ({pretrained}) to pad out your essays!",
171
+ inputs=[
172
+ gr.Textbox(label="Text", lines=10),
173
+ gr.Slider(label="Maximum length to insert between sentences",
174
+ minimum=1,
175
+ maximum=40,
176
+ step=1,
177
+ value=max_len),
178
+ gr.Slider(label="Top k", minimum=0, maximum=200, value=top_k),
179
+ gr.Slider(label="Temperature",
180
+ minimum=0,
181
+ maximum=2,
182
+ value=temperature),
183
+ gr.Slider(label="Typical p",
184
+ minimum=0,
185
+ maximum=1,
186
+ value=typical_p),
187
+ gr.Slider(label="Maximum iterations",
188
+ minimum=0,
189
+ maximum=1000,
190
+ value=max_iter),
191
+ gr.Slider(label="Burn-in",
192
+ minimum=0,
193
+ maximum=500,
194
+ value=burnin),
195
+ ],
196
+ outputs=gr.Textbox(label="Expanded text", lines=30))
197
 
198
  if __name__ == '__main__':
199
  parser = argparse.ArgumentParser()
200
  parser.add_argument('--port', type=int)
201
  parser.add_argument('--server', type=int)
202
  args = parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  demo.launch(server_name=args.server or '0.0.0.0', server_port=args.port)
notebooks/gradio_app.ipynb ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "bd31ba17",
6
+ "metadata": {},
7
+ "source": [
8
+ "<a href=\"https://colab.research.google.com/github/teticio/inBERTolate/blob/master/notebooks/gradio_app.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 1,
14
+ "id": "f2fa778e",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "try:\n",
19
+ " # are we running on Google Colab?\n",
20
+ " import google.colab\n",
21
+ " !git clone -q https://github.com/teticio/audio-diffusion.git\n",
22
+ " %cd audio-diffusion\n",
23
+ " !pip install -q -r requirements.txt\n",
24
+ "except:\n",
25
+ " pass"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 4,
31
+ "id": "3fd44423",
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "import os\n",
36
+ "import sys\n",
37
+ "sys.path.insert(0, os.path.dirname(os.path.abspath(\"\")))"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": null,
43
+ "id": "bb9e5adc",
44
+ "metadata": {},
45
+ "outputs": [],
46
+ "source": [
47
+ "import app\n",
48
+ "app.demo.launch(share=True);"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": null,
54
+ "id": "e6aefd43",
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": []
58
+ }
59
+ ],
60
+ "metadata": {
61
+ "accelerator": "GPU",
62
+ "colab": {
63
+ "provenance": []
64
+ },
65
+ "gpuClass": "standard",
66
+ "kernelspec": {
67
+ "display_name": "huggingface",
68
+ "language": "python",
69
+ "name": "huggingface"
70
+ },
71
+ "language_info": {
72
+ "codemirror_mode": {
73
+ "name": "ipython",
74
+ "version": 3
75
+ },
76
+ "file_extension": ".py",
77
+ "mimetype": "text/x-python",
78
+ "name": "python",
79
+ "nbconvert_exporter": "python",
80
+ "pygments_lexer": "ipython3",
81
+ "version": "3.10.4"
82
+ },
83
+ "toc": {
84
+ "base_numbering": 1,
85
+ "nav_menu": {},
86
+ "number_sections": true,
87
+ "sideBar": true,
88
+ "skip_h1_title": false,
89
+ "title_cell": "Table of Contents",
90
+ "title_sidebar": "Contents",
91
+ "toc_cell": false,
92
+ "toc_position": {},
93
+ "toc_section_display": true,
94
+ "toc_window_display": false
95
+ }
96
+ },
97
+ "nbformat": 4,
98
+ "nbformat_minor": 5
99
+ }